17130
|
1 /* This file is no longer automatically generated from libc. */
|
|
2
|
|
3 #define _MALLOC_INTERNAL
|
|
4
|
|
5 /* The malloc headers and source files from the C library follow here. */
|
|
6
|
|
7 /* Declarations for `malloc' and friends.
|
|
8 Copyright 1990, 91, 92, 93, 95, 96 Free Software Foundation, Inc.
|
|
9 Written May 1989 by Mike Haertel.
|
|
10
|
|
11 This library is free software; you can redistribute it and/or
|
|
12 modify it under the terms of the GNU Library General Public License as
|
|
13 published by the Free Software Foundation; either version 2 of the
|
|
14 License, or (at your option) any later version.
|
|
15
|
|
16 This library is distributed in the hope that it will be useful,
|
|
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
19 Library General Public License for more details.
|
|
20
|
|
21 You should have received a copy of the GNU Library General Public
|
|
22 License along with this library; see the file COPYING.LIB. If
|
|
23 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
|
|
24 Cambridge, MA 02139, USA.
|
|
25
|
|
26 The author may be reached (Email) at the address mike@ai.mit.edu,
|
|
27 or (US mail) as Mike Haertel c/o Free Software Foundation. */
|
|
28
|
|
29 #ifndef _MALLOC_H
|
|
30
|
|
31 #define _MALLOC_H 1
|
|
32
|
|
33 #ifdef _MALLOC_INTERNAL
|
|
34
|
|
35 #ifdef HAVE_CONFIG_H
|
|
36 #include <config.h>
|
|
37 #endif
|
|
38
|
|
39 #if defined (__cplusplus) || (defined (__STDC__) && __STDC__)
|
|
40 #undef __P
|
|
41 #define __P(args) args
|
|
42 #undef __ptr_t
|
|
43 #define __ptr_t void *
|
|
44 #else /* Not C++ or ANSI C. */
|
|
45 #undef __P
|
|
46 #define __P(args) ()
|
|
47 #undef const
|
|
48 #define const
|
|
49 #undef __ptr_t
|
|
50 #define __ptr_t char *
|
|
51 #endif /* C++ or ANSI C. */
|
|
52
|
|
53 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG)
|
|
54 #include <string.h>
|
|
55 #else
|
|
56 #ifndef memset
|
|
57 #define memset(s, zero, n) bzero ((s), (n))
|
|
58 #endif
|
|
59 #ifndef memcpy
|
|
60 #define memcpy(d, s, n) bcopy ((s), (d), (n))
|
|
61 #endif
|
|
62 #endif
|
|
63
|
|
64 #if defined (__GNU_LIBRARY__) || (defined (__STDC__) && __STDC__)
|
|
65 #include <limits.h>
|
|
66 #else
|
|
67 #ifndef CHAR_BIT
|
|
68 #define CHAR_BIT 8
|
|
69 #endif
|
|
70 #endif
|
|
71
|
|
72 #ifdef HAVE_UNISTD_H
|
|
73 #include <unistd.h>
|
|
74 #endif
|
|
75
|
|
76 #endif /* _MALLOC_INTERNAL. */
|
|
77
|
|
78
|
|
79 #ifdef __cplusplus
|
|
80 extern "C"
|
|
81 {
|
|
82 #endif
|
|
83
|
|
84 #if defined (__STDC__) && __STDC__
|
|
85 #include <stddef.h>
|
|
86 #define __malloc_size_t size_t
|
|
87 #define __malloc_ptrdiff_t ptrdiff_t
|
|
88 #else
|
|
89 #define __malloc_size_t unsigned int
|
|
90 #define __malloc_ptrdiff_t int
|
|
91 #endif
|
|
92
|
|
93 #ifndef NULL
|
|
94 #define NULL 0
|
|
95 #endif
|
|
96
|
|
97
|
|
98 /* Allocate SIZE bytes of memory. */
|
|
99 extern __ptr_t malloc __P ((__malloc_size_t __size));
|
|
100 /* Re-allocate the previously allocated block
|
|
101 in __ptr_t, making the new block SIZE bytes long. */
|
|
102 extern __ptr_t realloc __P ((__ptr_t __ptr, __malloc_size_t __size));
|
|
103 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */
|
|
104 extern __ptr_t calloc __P ((__malloc_size_t __nmemb, __malloc_size_t __size));
|
|
105 /* Free a block allocated by `malloc', `realloc' or `calloc'. */
|
|
106 extern void free __P ((__ptr_t __ptr));
|
|
107
|
|
108 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */
|
|
109 #if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */
|
|
110 extern __ptr_t memalign __P ((__malloc_size_t __alignment,
|
|
111 __malloc_size_t __size));
|
|
112 #endif
|
|
113
|
|
114 /* Allocate SIZE bytes on a page boundary. */
|
|
115 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC))
|
|
116 extern __ptr_t valloc __P ((__malloc_size_t __size));
|
|
117 #endif
|
|
118
|
|
119
|
|
120 #ifdef _MALLOC_INTERNAL
|
|
121
|
|
122 /* The allocator divides the heap into blocks of fixed size; large
|
|
123 requests receive one or more whole blocks, and small requests
|
|
124 receive a fragment of a block. Fragment sizes are powers of two,
|
|
125 and all fragments of a block are the same size. When all the
|
|
126 fragments in a block have been freed, the block itself is freed. */
|
|
127 #define INT_BIT (CHAR_BIT * sizeof(int))
|
|
128 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
|
|
129 #define BLOCKSIZE (1 << BLOCKLOG)
|
|
130 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
|
|
131
|
|
132 /* Determine the amount of memory spanned by the initial heap table
|
|
133 (not an absolute limit). */
|
|
134 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
|
|
135
|
|
136 /* Number of contiguous free blocks allowed to build up at the end of
|
|
137 memory before they will be returned to the system. */
|
|
138 #define FINAL_FREE_BLOCKS 8
|
|
139
|
|
140 /* Data structure giving per-block information. */
|
|
141 typedef union
|
|
142 {
|
|
143 /* Heap information for a busy block. */
|
|
144 struct
|
|
145 {
|
|
146 /* Zero for a large (multiblock) object, or positive giving the
|
|
147 logarithm to the base two of the fragment size. */
|
|
148 int type;
|
|
149 union
|
|
150 {
|
|
151 struct
|
|
152 {
|
|
153 __malloc_size_t nfree; /* Free frags in a fragmented block. */
|
|
154 __malloc_size_t first; /* First free fragment of the block. */
|
|
155 } frag;
|
|
156 /* For a large object, in its first block, this has the number
|
|
157 of blocks in the object. In the other blocks, this has a
|
|
158 negative number which says how far back the first block is. */
|
|
159 __malloc_ptrdiff_t size;
|
|
160 } info;
|
|
161 } busy;
|
|
162 /* Heap information for a free block
|
|
163 (that may be the first of a free cluster). */
|
|
164 struct
|
|
165 {
|
|
166 __malloc_size_t size; /* Size (in blocks) of a free cluster. */
|
|
167 __malloc_size_t next; /* Index of next free cluster. */
|
|
168 __malloc_size_t prev; /* Index of previous free cluster. */
|
|
169 } free;
|
|
170 } malloc_info;
|
|
171
|
|
172 /* Pointer to first block of the heap. */
|
|
173 extern char *_heapbase;
|
|
174
|
|
175 /* Table indexed by block number giving per-block information. */
|
|
176 extern malloc_info *_heapinfo;
|
|
177
|
|
178 /* Address to block number and vice versa. */
|
|
179 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
|
|
180 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase))
|
|
181
|
|
182 /* Current search index for the heap table. */
|
|
183 extern __malloc_size_t _heapindex;
|
|
184
|
|
185 /* Limit of valid info table indices. */
|
|
186 extern __malloc_size_t _heaplimit;
|
|
187
|
|
188 /* Doubly linked lists of free fragments. */
|
|
189 struct list
|
|
190 {
|
|
191 struct list *next;
|
|
192 struct list *prev;
|
|
193 };
|
|
194
|
|
195 /* Free list headers for each fragment size. */
|
|
196 extern struct list _fraghead[];
|
|
197
|
|
198 /* List of blocks allocated with `memalign' (or `valloc'). */
|
|
199 struct alignlist
|
|
200 {
|
|
201 struct alignlist *next;
|
|
202 __ptr_t aligned; /* The address that memaligned returned. */
|
|
203 __ptr_t exact; /* The address that malloc returned. */
|
|
204 };
|
|
205 extern struct alignlist *_aligned_blocks;
|
|
206
|
|
207 /* Instrumentation. */
|
|
208 extern __malloc_size_t _chunks_used;
|
|
209 extern __malloc_size_t _bytes_used;
|
|
210 extern __malloc_size_t _chunks_free;
|
|
211 extern __malloc_size_t _bytes_free;
|
|
212
|
|
213 /* Internal versions of `malloc', `realloc', and `free'
|
|
214 used when these functions need to call each other.
|
|
215 They are the same but don't call the hooks. */
|
|
216 extern __ptr_t _malloc_internal __P ((__malloc_size_t __size));
|
|
217 extern __ptr_t _realloc_internal __P ((__ptr_t __ptr, __malloc_size_t __size));
|
|
218 extern void _free_internal __P ((__ptr_t __ptr));
|
|
219
|
|
220 #endif /* _MALLOC_INTERNAL. */
|
|
221
|
|
222 /* Given an address in the middle of a malloc'd object,
|
|
223 return the address of the beginning of the object. */
|
|
224 extern __ptr_t malloc_find_object_address __P ((__ptr_t __ptr));
|
|
225
|
|
226 /* Underlying allocation function; successive calls should
|
|
227 return contiguous pieces of memory. */
|
|
228 extern __ptr_t (*__morecore) __P ((__malloc_ptrdiff_t __size));
|
|
229
|
|
230 /* Default value of `__morecore'. */
|
|
231 extern __ptr_t __default_morecore __P ((__malloc_ptrdiff_t __size));
|
|
232
|
|
233 /* If not NULL, this function is called after each time
|
|
234 `__morecore' is called to increase the data size. */
|
|
235 extern void (*__after_morecore_hook) __P ((void));
|
|
236
|
|
237 /* Number of extra blocks to get each time we ask for more core.
|
|
238 This reduces the frequency of calling `(*__morecore)'. */
|
|
239 extern __malloc_size_t __malloc_extra_blocks;
|
|
240
|
|
241 /* Nonzero if `malloc' has been called and done its initialization. */
|
|
242 extern int __malloc_initialized;
|
|
243 /* Function called to initialize malloc data structures. */
|
|
244 extern int __malloc_initialize __P ((void));
|
|
245
|
|
246 /* Hooks for debugging versions. */
|
|
247 extern void (*__malloc_initialize_hook) __P ((void));
|
|
248 extern void (*__free_hook) __P ((__ptr_t __ptr));
|
|
249 extern __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
|
|
250 extern __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
|
|
251 extern __ptr_t (*__memalign_hook) __P ((__malloc_size_t __size,
|
|
252 __malloc_size_t __alignment));
|
|
253
|
|
254 /* Return values for `mprobe': these are the kinds of inconsistencies that
|
|
255 `mcheck' enables detection of. */
|
|
256 enum mcheck_status
|
|
257 {
|
|
258 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */
|
|
259 MCHECK_OK, /* Block is fine. */
|
|
260 MCHECK_FREE, /* Block freed twice. */
|
|
261 MCHECK_HEAD, /* Memory before the block was clobbered. */
|
|
262 MCHECK_TAIL /* Memory after the block was clobbered. */
|
|
263 };
|
|
264
|
|
265 /* Activate a standard collection of debugging hooks. This must be called
|
|
266 before `malloc' is ever called. ABORTFUNC is called with an error code
|
|
267 (see enum above) when an inconsistency is detected. If ABORTFUNC is
|
|
268 null, the standard function prints on stderr and then calls `abort'. */
|
|
269 extern int mcheck __P ((void (*__abortfunc) __P ((enum mcheck_status))));
|
|
270
|
|
271 /* Check for aberrations in a particular malloc'd block. You must have
|
|
272 called `mcheck' already. These are the same checks that `mcheck' does
|
|
273 when you free or reallocate a block. */
|
|
274 extern enum mcheck_status mprobe __P ((__ptr_t __ptr));
|
|
275
|
|
276 /* Activate a standard collection of tracing hooks. */
|
|
277 extern void mtrace __P ((void));
|
|
278 extern void muntrace __P ((void));
|
|
279
|
|
280 /* Statistics available to the user. */
|
|
281 struct mstats
|
|
282 {
|
|
283 __malloc_size_t bytes_total; /* Total size of the heap. */
|
|
284 __malloc_size_t chunks_used; /* Chunks allocated by the user. */
|
|
285 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */
|
|
286 __malloc_size_t chunks_free; /* Chunks in the free list. */
|
|
287 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */
|
|
288 };
|
|
289
|
|
290 /* Pick up the current statistics. */
|
|
291 extern struct mstats mstats __P ((void));
|
|
292
|
|
293 /* Call WARNFUN with a warning message when memory usage is high. */
|
|
294 extern void memory_warnings __P ((__ptr_t __start,
|
|
295 void (*__warnfun) __P ((const char *))));
|
|
296
|
|
297
|
|
298 /* Relocating allocator. */
|
|
299
|
|
300 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */
|
|
301 extern __ptr_t r_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
|
|
302
|
|
303 /* Free the storage allocated in HANDLEPTR. */
|
|
304 extern void r_alloc_free __P ((__ptr_t *__handleptr));
|
|
305
|
|
306 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */
|
|
307 extern __ptr_t r_re_alloc __P ((__ptr_t *__handleptr, __malloc_size_t __size));
|
|
308
|
|
309
|
|
310 #ifdef __cplusplus
|
|
311 }
|
|
312 #endif
|
|
313
|
|
314 #endif /* malloc.h */
|
|
315 /* Memory allocator `malloc'.
|
|
316 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
|
|
317 Written May 1989 by Mike Haertel.
|
|
318
|
|
319 This library is free software; you can redistribute it and/or
|
|
320 modify it under the terms of the GNU Library General Public License as
|
|
321 published by the Free Software Foundation; either version 2 of the
|
|
322 License, or (at your option) any later version.
|
|
323
|
|
324 This library is distributed in the hope that it will be useful,
|
|
325 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
326 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
327 Library General Public License for more details.
|
|
328
|
|
329 You should have received a copy of the GNU Library General Public
|
|
330 License along with this library; see the file COPYING.LIB. If
|
|
331 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
|
|
332 Cambridge, MA 02139, USA.
|
|
333
|
|
334 The author may be reached (Email) at the address mike@ai.mit.edu,
|
|
335 or (US mail) as Mike Haertel c/o Free Software Foundation. */
|
|
336
|
|
337 #ifndef _MALLOC_INTERNAL
|
|
338 #define _MALLOC_INTERNAL
|
|
339 #include <malloc.h>
|
|
340 #endif
|
|
341 #include <errno.h>
|
|
342
|
|
343 /* How to really get more memory. */
|
|
344 __ptr_t (*__morecore) __P ((ptrdiff_t __size)) = __default_morecore;
|
|
345
|
|
346 /* Debugging hook for `malloc'. */
|
|
347 __ptr_t (*__malloc_hook) __P ((__malloc_size_t __size));
|
|
348
|
|
349 /* Pointer to the base of the first block. */
|
|
350 char *_heapbase;
|
|
351
|
|
352 /* Block information table. Allocated with align/__free (not malloc/free). */
|
|
353 malloc_info *_heapinfo;
|
|
354
|
|
355 /* Number of info entries. */
|
|
356 static __malloc_size_t heapsize;
|
|
357
|
|
358 /* Search index in the info table. */
|
|
359 __malloc_size_t _heapindex;
|
|
360
|
|
361 /* Limit of valid info table indices. */
|
|
362 __malloc_size_t _heaplimit;
|
|
363
|
|
364 /* Free lists for each fragment size. */
|
|
365 struct list _fraghead[BLOCKLOG];
|
|
366
|
|
367 /* Instrumentation. */
|
|
368 __malloc_size_t _chunks_used;
|
|
369 __malloc_size_t _bytes_used;
|
|
370 __malloc_size_t _chunks_free;
|
|
371 __malloc_size_t _bytes_free;
|
|
372
|
|
373 /* Are you experienced? */
|
|
374 int __malloc_initialized;
|
|
375
|
|
376 __malloc_size_t __malloc_extra_blocks;
|
|
377
|
|
378 void (*__malloc_initialize_hook) __P ((void));
|
|
379 void (*__after_morecore_hook) __P ((void));
|
|
380
|
|
381
|
|
382 /* Aligned allocation. */
|
|
383 static __ptr_t align __P ((__malloc_size_t));
|
|
384 static __ptr_t
|
|
385 align (size)
|
|
386 __malloc_size_t size;
|
|
387 {
|
|
388 __ptr_t result;
|
|
389 unsigned long int adj;
|
|
390
|
|
391 result = (*__morecore) (size);
|
|
392 adj = (unsigned long int) ((unsigned long int) ((char *) result -
|
|
393 (char *) NULL)) % BLOCKSIZE;
|
|
394 if (adj != 0)
|
|
395 {
|
|
396 __ptr_t new;
|
|
397 adj = BLOCKSIZE - adj;
|
|
398 new = (*__morecore) (adj);
|
|
399 result = (char *) result + adj;
|
|
400 }
|
|
401
|
|
402 if (__after_morecore_hook)
|
|
403 (*__after_morecore_hook) ();
|
|
404
|
|
405 return result;
|
|
406 }
|
|
407
|
|
408 /* Get SIZE bytes, if we can get them starting at END.
|
|
409 Return the address of the space we got.
|
|
410 If we cannot get space at END, fail and return 0. */
|
|
411 static __ptr_t get_contiguous_space __P ((__malloc_ptrdiff_t, __ptr_t));
|
|
412 static __ptr_t
|
|
413 get_contiguous_space (size, position)
|
|
414 __malloc_ptrdiff_t size;
|
|
415 __ptr_t position;
|
|
416 {
|
|
417 __ptr_t before;
|
|
418 __ptr_t after;
|
|
419
|
|
420 before = (*__morecore) (0);
|
|
421 /* If we can tell in advance that the break is at the wrong place,
|
|
422 fail now. */
|
|
423 if (before != position)
|
|
424 return 0;
|
|
425
|
|
426 /* Allocate SIZE bytes and get the address of them. */
|
|
427 after = (*__morecore) (size);
|
|
428 if (!after)
|
|
429 return 0;
|
|
430
|
|
431 /* It was not contiguous--reject it. */
|
|
432 if (after != position)
|
|
433 {
|
|
434 (*__morecore) (- size);
|
|
435 return 0;
|
|
436 }
|
|
437
|
|
438 return after;
|
|
439 }
|
|
440
|
|
441
|
|
442 /* This is called when `_heapinfo' and `heapsize' have just
|
|
443 been set to describe a new info table. Set up the table
|
|
444 to describe itself and account for it in the statistics. */
|
|
445 static void register_heapinfo __P ((void));
|
|
446 #ifdef __GNUC__
|
|
447 __inline__
|
|
448 #endif
|
|
449 static void
|
|
450 register_heapinfo ()
|
|
451 {
|
|
452 __malloc_size_t block, blocks;
|
|
453
|
|
454 block = BLOCK (_heapinfo);
|
|
455 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
|
|
456
|
|
457 /* Account for the _heapinfo block itself in the statistics. */
|
|
458 _bytes_used += blocks * BLOCKSIZE;
|
|
459 ++_chunks_used;
|
|
460
|
|
461 /* Describe the heapinfo block itself in the heapinfo. */
|
|
462 _heapinfo[block].busy.type = 0;
|
|
463 _heapinfo[block].busy.info.size = blocks;
|
|
464 /* Leave back-pointers for malloc_find_address. */
|
|
465 while (--blocks > 0)
|
|
466 _heapinfo[block + blocks].busy.info.size = -blocks;
|
|
467 }
|
|
468
|
|
469 /* Set everything up and remember that we have. */
|
|
470 int
|
|
471 __malloc_initialize ()
|
|
472 {
|
|
473 if (__malloc_initialized)
|
|
474 return 0;
|
|
475
|
|
476 if (__malloc_initialize_hook)
|
|
477 (*__malloc_initialize_hook) ();
|
|
478
|
|
479 heapsize = HEAP / BLOCKSIZE;
|
|
480 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info));
|
|
481 if (_heapinfo == NULL)
|
|
482 return 0;
|
|
483 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
|
|
484 _heapinfo[0].free.size = 0;
|
|
485 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
|
|
486 _heapindex = 0;
|
|
487 _heapbase = (char *) _heapinfo;
|
|
488 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
|
|
489
|
|
490 register_heapinfo ();
|
|
491
|
|
492 __malloc_initialized = 1;
|
|
493 return 1;
|
|
494 }
|
|
495
|
|
496 static int morecore_recursing;
|
|
497
|
|
498 /* Get neatly aligned memory, initializing or
|
|
499 growing the heap info table as necessary. */
|
|
500 static __ptr_t morecore __P ((__malloc_size_t));
|
|
501 static __ptr_t
|
|
502 morecore (size)
|
|
503 __malloc_size_t size;
|
|
504 {
|
|
505 __ptr_t result;
|
|
506 malloc_info *newinfo, *oldinfo;
|
|
507 __malloc_size_t newsize;
|
|
508
|
|
509 if (morecore_recursing)
|
|
510 /* Avoid recursion. The caller will know how to handle a null return. */
|
|
511 return NULL;
|
|
512
|
|
513 result = align (size);
|
|
514 if (result == NULL)
|
|
515 return NULL;
|
|
516
|
|
517 /* Check if we need to grow the info table. */
|
|
518 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize)
|
|
519 {
|
|
520 /* Calculate the new _heapinfo table size. We do not account for the
|
|
521 added blocks in the table itself, as we hope to place them in
|
|
522 existing free space, which is already covered by part of the
|
|
523 existing table. */
|
|
524 newsize = heapsize;
|
|
525 do
|
|
526 newsize *= 2;
|
|
527 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize);
|
|
528
|
|
529 /* We must not reuse existing core for the new info table when called
|
|
530 from realloc in the case of growing a large block, because the
|
|
531 block being grown is momentarily marked as free. In this case
|
|
532 _heaplimit is zero so we know not to reuse space for internal
|
|
533 allocation. */
|
|
534 if (_heaplimit != 0)
|
|
535 {
|
|
536 /* First try to allocate the new info table in core we already
|
|
537 have, in the usual way using realloc. If realloc cannot
|
|
538 extend it in place or relocate it to existing sufficient core,
|
|
539 we will get called again, and the code above will notice the
|
|
540 `morecore_recursing' flag and return null. */
|
|
541 int save = errno; /* Don't want to clobber errno with ENOMEM. */
|
|
542 morecore_recursing = 1;
|
|
543 newinfo = (malloc_info *) _realloc_internal
|
|
544 (_heapinfo, newsize * sizeof (malloc_info));
|
|
545 morecore_recursing = 0;
|
|
546 if (newinfo == NULL)
|
|
547 errno = save;
|
|
548 else
|
|
549 {
|
|
550 /* We found some space in core, and realloc has put the old
|
|
551 table's blocks on the free list. Now zero the new part
|
|
552 of the table and install the new table location. */
|
|
553 memset (&newinfo[heapsize], 0,
|
|
554 (newsize - heapsize) * sizeof (malloc_info));
|
|
555 _heapinfo = newinfo;
|
|
556 heapsize = newsize;
|
|
557 goto got_heap;
|
|
558 }
|
|
559 }
|
|
560
|
|
561 /* Allocate new space for the malloc info table. */
|
|
562 while (1)
|
|
563 {
|
|
564 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info));
|
|
565
|
|
566 /* Did it fail? */
|
|
567 if (newinfo == NULL)
|
|
568 {
|
|
569 (*__morecore) (-size);
|
|
570 return NULL;
|
|
571 }
|
|
572
|
|
573 /* Is it big enough to record status for its own space?
|
|
574 If so, we win. */
|
|
575 if ((__malloc_size_t) BLOCK ((char *) newinfo
|
|
576 + newsize * sizeof (malloc_info))
|
|
577 < newsize)
|
|
578 break;
|
|
579
|
|
580 /* Must try again. First give back most of what we just got. */
|
|
581 (*__morecore) (- newsize * sizeof (malloc_info));
|
|
582 newsize *= 2;
|
|
583 }
|
|
584
|
|
585 /* Copy the old table to the beginning of the new,
|
|
586 and zero the rest of the new table. */
|
|
587 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
|
|
588 memset (&newinfo[heapsize], 0,
|
|
589 (newsize - heapsize) * sizeof (malloc_info));
|
|
590 oldinfo = _heapinfo;
|
|
591 _heapinfo = newinfo;
|
|
592 heapsize = newsize;
|
|
593
|
|
594 register_heapinfo ();
|
|
595
|
|
596 /* Reset _heaplimit so _free_internal never decides
|
|
597 it can relocate or resize the info table. */
|
|
598 _heaplimit = 0;
|
|
599 _free_internal (oldinfo);
|
|
600
|
|
601 /* The new heap limit includes the new table just allocated. */
|
|
602 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
|
|
603 return result;
|
|
604 }
|
|
605
|
|
606 got_heap:
|
|
607 _heaplimit = BLOCK ((char *) result + size);
|
|
608 return result;
|
|
609 }
|
|
610
|
|
611 /* Allocate memory from the heap. */
|
|
612 __ptr_t
|
|
613 _malloc_internal (size)
|
|
614 __malloc_size_t size;
|
|
615 {
|
|
616 __ptr_t result;
|
|
617 __malloc_size_t block, blocks, lastblocks, start;
|
|
618 register __malloc_size_t i;
|
|
619 struct list *next;
|
|
620
|
|
621 /* ANSI C allows `malloc (0)' to either return NULL, or to return a
|
|
622 valid address you can realloc and free (though not dereference).
|
|
623
|
|
624 It turns out that some extant code (sunrpc, at least Ultrix's version)
|
|
625 expects `malloc (0)' to return non-NULL and breaks otherwise.
|
|
626 Be compatible. */
|
|
627
|
|
628 #if 0
|
|
629 if (size == 0)
|
|
630 return NULL;
|
|
631 #endif
|
|
632
|
|
633 if (size < sizeof (struct list))
|
|
634 size = sizeof (struct list);
|
|
635
|
|
636 #ifdef SUNOS_LOCALTIME_BUG
|
|
637 if (size < 16)
|
|
638 size = 16;
|
|
639 #endif
|
|
640
|
|
641 /* Determine the allocation policy based on the request size. */
|
|
642 if (size <= BLOCKSIZE / 2)
|
|
643 {
|
|
644 /* Small allocation to receive a fragment of a block.
|
|
645 Determine the logarithm to base two of the fragment size. */
|
|
646 register __malloc_size_t log = 1;
|
|
647 --size;
|
|
648 while ((size /= 2) != 0)
|
|
649 ++log;
|
|
650
|
|
651 /* Look in the fragment lists for a
|
|
652 free fragment of the desired size. */
|
|
653 next = _fraghead[log].next;
|
|
654 if (next != NULL)
|
|
655 {
|
|
656 /* There are free fragments of this size.
|
|
657 Pop a fragment out of the fragment list and return it.
|
|
658 Update the block's nfree and first counters. */
|
|
659 result = (__ptr_t) next;
|
|
660 next->prev->next = next->next;
|
|
661 if (next->next != NULL)
|
|
662 next->next->prev = next->prev;
|
|
663 block = BLOCK (result);
|
|
664 if (--_heapinfo[block].busy.info.frag.nfree != 0)
|
|
665 _heapinfo[block].busy.info.frag.first = (unsigned long int)
|
|
666 ((unsigned long int) ((char *) next->next - (char *) NULL)
|
|
667 % BLOCKSIZE) >> log;
|
|
668
|
|
669 /* Update the statistics. */
|
|
670 ++_chunks_used;
|
|
671 _bytes_used += 1 << log;
|
|
672 --_chunks_free;
|
|
673 _bytes_free -= 1 << log;
|
|
674 }
|
|
675 else
|
|
676 {
|
|
677 /* No free fragments of the desired size, so get a new block
|
|
678 and break it into fragments, returning the first. */
|
|
679 result = malloc (BLOCKSIZE);
|
|
680 if (result == NULL)
|
|
681 return NULL;
|
|
682
|
|
683 /* Link all fragments but the first into the free list. */
|
|
684 next = (struct list *) ((char *) result + (1 << log));
|
|
685 next->next = NULL;
|
|
686 next->prev = &_fraghead[log];
|
|
687 _fraghead[log].next = next;
|
|
688
|
|
689 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i)
|
|
690 {
|
|
691 next = (struct list *) ((char *) result + (i << log));
|
|
692 next->next = _fraghead[log].next;
|
|
693 next->prev = &_fraghead[log];
|
|
694 next->prev->next = next;
|
|
695 next->next->prev = next;
|
|
696 }
|
|
697
|
|
698 /* Initialize the nfree and first counters for this block. */
|
|
699 block = BLOCK (result);
|
|
700 _heapinfo[block].busy.type = log;
|
|
701 _heapinfo[block].busy.info.frag.nfree = i - 1;
|
|
702 _heapinfo[block].busy.info.frag.first = i - 1;
|
|
703
|
|
704 _chunks_free += (BLOCKSIZE >> log) - 1;
|
|
705 _bytes_free += BLOCKSIZE - (1 << log);
|
|
706 _bytes_used -= BLOCKSIZE - (1 << log);
|
|
707 }
|
|
708 }
|
|
709 else
|
|
710 {
|
|
711 /* Large allocation to receive one or more blocks.
|
|
712 Search the free list in a circle starting at the last place visited.
|
|
713 If we loop completely around without finding a large enough
|
|
714 space we will have to get more memory from the system. */
|
|
715 blocks = BLOCKIFY (size);
|
|
716 start = block = _heapindex;
|
|
717 while (_heapinfo[block].free.size < blocks)
|
|
718 {
|
|
719 block = _heapinfo[block].free.next;
|
|
720 if (block == start)
|
|
721 {
|
|
722 /* Need to get more from the system. Get a little extra. */
|
|
723 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks;
|
|
724 block = _heapinfo[0].free.prev;
|
|
725 lastblocks = _heapinfo[block].free.size;
|
|
726 /* Check to see if the new core will be contiguous with the
|
|
727 final free block; if so we don't need to get as much. */
|
|
728 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
|
|
729 /* We can't do this if we will have to make the heap info
|
|
730 table bigger to accomodate the new space. */
|
|
731 block + wantblocks <= heapsize &&
|
|
732 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
|
|
733 ADDRESS (block + lastblocks)))
|
|
734 {
|
|
735 /* We got it contiguously. Which block we are extending
|
|
736 (the `final free block' referred to above) might have
|
|
737 changed, if it got combined with a freed info table. */
|
|
738 block = _heapinfo[0].free.prev;
|
|
739 _heapinfo[block].free.size += (wantblocks - lastblocks);
|
|
740 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
|
|
741 _heaplimit += wantblocks - lastblocks;
|
|
742 continue;
|
|
743 }
|
|
744 result = morecore (wantblocks * BLOCKSIZE);
|
|
745 if (result == NULL)
|
|
746 return NULL;
|
|
747 block = BLOCK (result);
|
|
748 /* Put the new block at the end of the free list. */
|
|
749 _heapinfo[block].free.size = wantblocks;
|
|
750 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
|
|
751 _heapinfo[block].free.next = 0;
|
|
752 _heapinfo[0].free.prev = block;
|
|
753 _heapinfo[_heapinfo[block].free.prev].free.next = block;
|
|
754 ++_chunks_free;
|
|
755 /* Now loop to use some of that block for this allocation. */
|
|
756 }
|
|
757 }
|
|
758
|
|
759 /* At this point we have found a suitable free list entry.
|
|
760 Figure out how to remove what we need from the list. */
|
|
761 result = ADDRESS (block);
|
|
762 if (_heapinfo[block].free.size > blocks)
|
|
763 {
|
|
764 /* The block we found has a bit left over,
|
|
765 so relink the tail end back into the free list. */
|
|
766 _heapinfo[block + blocks].free.size
|
|
767 = _heapinfo[block].free.size - blocks;
|
|
768 _heapinfo[block + blocks].free.next
|
|
769 = _heapinfo[block].free.next;
|
|
770 _heapinfo[block + blocks].free.prev
|
|
771 = _heapinfo[block].free.prev;
|
|
772 _heapinfo[_heapinfo[block].free.prev].free.next
|
|
773 = _heapinfo[_heapinfo[block].free.next].free.prev
|
|
774 = _heapindex = block + blocks;
|
|
775 }
|
|
776 else
|
|
777 {
|
|
778 /* The block exactly matches our requirements,
|
|
779 so just remove it from the list. */
|
|
780 _heapinfo[_heapinfo[block].free.next].free.prev
|
|
781 = _heapinfo[block].free.prev;
|
|
782 _heapinfo[_heapinfo[block].free.prev].free.next
|
|
783 = _heapindex = _heapinfo[block].free.next;
|
|
784 --_chunks_free;
|
|
785 }
|
|
786
|
|
787 _heapinfo[block].busy.type = 0;
|
|
788 _heapinfo[block].busy.info.size = blocks;
|
|
789 ++_chunks_used;
|
|
790 _bytes_used += blocks * BLOCKSIZE;
|
|
791 _bytes_free -= blocks * BLOCKSIZE;
|
|
792
|
|
793 /* Mark all the blocks of the object just allocated except for the
|
|
794 first with a negative number so you can find the first block by
|
|
795 adding that adjustment. */
|
|
796 while (--blocks > 0)
|
|
797 _heapinfo[block + blocks].busy.info.size = -blocks;
|
|
798 }
|
|
799
|
|
800 return result;
|
|
801 }
|
|
802
|
|
803 __ptr_t
|
|
804 malloc (size)
|
|
805 __malloc_size_t size;
|
|
806 {
|
|
807 if (!__malloc_initialized && !__malloc_initialize ())
|
|
808 return NULL;
|
|
809
|
|
810 return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size);
|
|
811 }
|
|
812
|
|
813 #ifndef _LIBC
|
|
814
|
|
815 /* On some ANSI C systems, some libc functions call _malloc, _free
|
|
816 and _realloc. Make them use the GNU functions. */
|
|
817
|
|
818 __ptr_t
|
|
819 _malloc (size)
|
|
820 __malloc_size_t size;
|
|
821 {
|
|
822 return malloc (size);
|
|
823 }
|
|
824
|
|
825 void
|
|
826 _free (ptr)
|
|
827 __ptr_t ptr;
|
|
828 {
|
|
829 free (ptr);
|
|
830 }
|
|
831
|
|
832 __ptr_t
|
|
833 _realloc (ptr, size)
|
|
834 __ptr_t ptr;
|
|
835 __malloc_size_t size;
|
|
836 {
|
|
837 return realloc (ptr, size);
|
|
838 }
|
|
839
|
|
840 #endif
|
|
841 /* Free a block of memory allocated by `malloc'.
|
|
842 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
|
|
843 Written May 1989 by Mike Haertel.
|
|
844
|
|
845 This library is free software; you can redistribute it and/or
|
|
846 modify it under the terms of the GNU Library General Public License as
|
|
847 published by the Free Software Foundation; either version 2 of the
|
|
848 License, or (at your option) any later version.
|
|
849
|
|
850 This library is distributed in the hope that it will be useful,
|
|
851 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
852 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
853 Library General Public License for more details.
|
|
854
|
|
855 You should have received a copy of the GNU Library General Public
|
|
856 License along with this library; see the file COPYING.LIB. If
|
|
857 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
|
|
858 Cambridge, MA 02139, USA.
|
|
859
|
|
860 The author may be reached (Email) at the address mike@ai.mit.edu,
|
|
861 or (US mail) as Mike Haertel c/o Free Software Foundation. */
|
|
862
|
|
863 #ifndef _MALLOC_INTERNAL
|
|
864 #define _MALLOC_INTERNAL
|
|
865 #include <malloc.h>
|
|
866 #endif
|
|
867
|
|
868
|
|
869 /* Cope with systems lacking `memmove'. */
|
|
870 #ifndef memmove
|
|
871 #if (defined (MEMMOVE_MISSING) || \
|
|
872 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
|
|
873 #ifdef emacs
|
|
874 #undef __malloc_safe_bcopy
|
|
875 #define __malloc_safe_bcopy safe_bcopy
|
|
876 #endif
|
|
877 /* This function is defined in realloc.c. */
|
|
878 extern void __malloc_safe_bcopy __P ((__ptr_t, __ptr_t, __malloc_size_t));
|
|
879 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
|
|
880 #endif
|
|
881 #endif
|
|
882
|
|
883
|
|
884 /* Debugging hook for free. */
|
|
885 void (*__free_hook) __P ((__ptr_t __ptr));
|
|
886
|
|
887 /* List of blocks allocated by memalign. */
|
|
888 struct alignlist *_aligned_blocks = NULL;
|
|
889
|
|
890 /* Return memory to the heap.
|
|
891 Like `free' but don't call a __free_hook if there is one. */
|
|
892 void
|
|
893 _free_internal (ptr)
|
|
894 __ptr_t ptr;
|
|
895 {
|
|
896 int type;
|
|
897 __malloc_size_t block, blocks;
|
|
898 register __malloc_size_t i;
|
|
899 struct list *prev, *next;
|
|
900 __ptr_t curbrk;
|
|
901 const __malloc_size_t lesscore_threshold
|
|
902 /* Threshold of free space at which we will return some to the system. */
|
|
903 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
|
|
904
|
|
905 register struct alignlist *l;
|
|
906
|
|
907 if (ptr == NULL)
|
|
908 return;
|
|
909
|
|
910 for (l = _aligned_blocks; l != NULL; l = l->next)
|
|
911 if (l->aligned == ptr)
|
|
912 {
|
|
913 l->aligned = NULL; /* Mark the slot in the list as free. */
|
|
914 ptr = l->exact;
|
|
915 break;
|
|
916 }
|
|
917
|
|
918 block = BLOCK (ptr);
|
|
919
|
|
920 type = _heapinfo[block].busy.type;
|
|
921 switch (type)
|
|
922 {
|
|
923 case 0:
|
|
924 /* Get as many statistics as early as we can. */
|
|
925 --_chunks_used;
|
|
926 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
|
|
927 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
|
|
928
|
|
929 /* Find the free cluster previous to this one in the free list.
|
|
930 Start searching at the last block referenced; this may benefit
|
|
931 programs with locality of allocation. */
|
|
932 i = _heapindex;
|
|
933 if (i > block)
|
|
934 while (i > block)
|
|
935 i = _heapinfo[i].free.prev;
|
|
936 else
|
|
937 {
|
|
938 do
|
|
939 i = _heapinfo[i].free.next;
|
|
940 while (i > 0 && i < block);
|
|
941 i = _heapinfo[i].free.prev;
|
|
942 }
|
|
943
|
|
944 /* Determine how to link this block into the free list. */
|
|
945 if (block == i + _heapinfo[i].free.size)
|
|
946 {
|
|
947 /* Coalesce this block with its predecessor. */
|
|
948 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
|
|
949 block = i;
|
|
950 }
|
|
951 else
|
|
952 {
|
|
953 /* Really link this block back into the free list. */
|
|
954 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
|
|
955 _heapinfo[block].free.next = _heapinfo[i].free.next;
|
|
956 _heapinfo[block].free.prev = i;
|
|
957 _heapinfo[i].free.next = block;
|
|
958 _heapinfo[_heapinfo[block].free.next].free.prev = block;
|
|
959 ++_chunks_free;
|
|
960 }
|
|
961
|
|
962 /* Now that the block is linked in, see if we can coalesce it
|
|
963 with its successor (by deleting its successor from the list
|
|
964 and adding in its size). */
|
|
965 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
|
|
966 {
|
|
967 _heapinfo[block].free.size
|
|
968 += _heapinfo[_heapinfo[block].free.next].free.size;
|
|
969 _heapinfo[block].free.next
|
|
970 = _heapinfo[_heapinfo[block].free.next].free.next;
|
|
971 _heapinfo[_heapinfo[block].free.next].free.prev = block;
|
|
972 --_chunks_free;
|
|
973 }
|
|
974
|
|
975 /* How many trailing free blocks are there now? */
|
|
976 blocks = _heapinfo[block].free.size;
|
|
977
|
|
978 /* Where is the current end of accessible core? */
|
|
979 curbrk = (*__morecore) (0);
|
|
980
|
|
981 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
|
|
982 {
|
|
983 /* The end of the malloc heap is at the end of accessible core.
|
|
984 It's possible that moving _heapinfo will allow us to
|
|
985 return some space to the system. */
|
|
986
|
|
987 __malloc_size_t info_block = BLOCK (_heapinfo);
|
|
988 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size;
|
|
989 __malloc_size_t prev_block = _heapinfo[block].free.prev;
|
|
990 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size;
|
|
991 __malloc_size_t next_block = _heapinfo[block].free.next;
|
|
992 __malloc_size_t next_blocks = _heapinfo[next_block].free.size;
|
|
993
|
|
994 if (/* Win if this block being freed is last in core, the info table
|
|
995 is just before it, the previous free block is just before the
|
|
996 info table, and the two free blocks together form a useful
|
|
997 amount to return to the system. */
|
|
998 (block + blocks == _heaplimit &&
|
|
999 info_block + info_blocks == block &&
|
|
1000 prev_block != 0 && prev_block + prev_blocks == info_block &&
|
|
1001 blocks + prev_blocks >= lesscore_threshold) ||
|
|
1002 /* Nope, not the case. We can also win if this block being
|
|
1003 freed is just before the info table, and the table extends
|
|
1004 to the end of core or is followed only by a free block,
|
|
1005 and the total free space is worth returning to the system. */
|
|
1006 (block + blocks == info_block &&
|
|
1007 ((info_block + info_blocks == _heaplimit &&
|
|
1008 blocks >= lesscore_threshold) ||
|
|
1009 (info_block + info_blocks == next_block &&
|
|
1010 next_block + next_blocks == _heaplimit &&
|
|
1011 blocks + next_blocks >= lesscore_threshold)))
|
|
1012 )
|
|
1013 {
|
|
1014 malloc_info *newinfo;
|
|
1015 __malloc_size_t oldlimit = _heaplimit;
|
|
1016
|
|
1017 /* Free the old info table, clearing _heaplimit to avoid
|
|
1018 recursion into this code. We don't want to return the
|
|
1019 table's blocks to the system before we have copied them to
|
|
1020 the new location. */
|
|
1021 _heaplimit = 0;
|
|
1022 _free_internal (_heapinfo);
|
|
1023 _heaplimit = oldlimit;
|
|
1024
|
|
1025 /* Tell malloc to search from the beginning of the heap for
|
|
1026 free blocks, so it doesn't reuse the ones just freed. */
|
|
1027 _heapindex = 0;
|
|
1028
|
|
1029 /* Allocate new space for the info table and move its data. */
|
|
1030 newinfo = (malloc_info *) _malloc_internal (info_blocks
|
|
1031 * BLOCKSIZE);
|
|
1032 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
|
|
1033 _heapinfo = newinfo;
|
|
1034
|
|
1035 /* We should now have coalesced the free block with the
|
|
1036 blocks freed from the old info table. Examine the entire
|
|
1037 trailing free block to decide below whether to return some
|
|
1038 to the system. */
|
|
1039 block = _heapinfo[0].free.prev;
|
|
1040 blocks = _heapinfo[block].free.size;
|
|
1041 }
|
|
1042
|
|
1043 /* Now see if we can return stuff to the system. */
|
|
1044 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
|
|
1045 {
|
|
1046 register __malloc_size_t bytes = blocks * BLOCKSIZE;
|
|
1047 _heaplimit -= blocks;
|
|
1048 (*__morecore) (-bytes);
|
|
1049 _heapinfo[_heapinfo[block].free.prev].free.next
|
|
1050 = _heapinfo[block].free.next;
|
|
1051 _heapinfo[_heapinfo[block].free.next].free.prev
|
|
1052 = _heapinfo[block].free.prev;
|
|
1053 block = _heapinfo[block].free.prev;
|
|
1054 --_chunks_free;
|
|
1055 _bytes_free -= bytes;
|
|
1056 }
|
|
1057 }
|
|
1058
|
|
1059 /* Set the next search to begin at this block. */
|
|
1060 _heapindex = block;
|
|
1061 break;
|
|
1062
|
|
1063 default:
|
|
1064 /* Do some of the statistics. */
|
|
1065 --_chunks_used;
|
|
1066 _bytes_used -= 1 << type;
|
|
1067 ++_chunks_free;
|
|
1068 _bytes_free += 1 << type;
|
|
1069
|
|
1070 /* Get the address of the first free fragment in this block. */
|
|
1071 prev = (struct list *) ((char *) ADDRESS (block) +
|
|
1072 (_heapinfo[block].busy.info.frag.first << type));
|
|
1073
|
|
1074 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
|
|
1075 {
|
|
1076 /* If all fragments of this block are free, remove them
|
|
1077 from the fragment list and free the whole block. */
|
|
1078 next = prev;
|
|
1079 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i)
|
|
1080 next = next->next;
|
|
1081 prev->prev->next = next;
|
|
1082 if (next != NULL)
|
|
1083 next->prev = prev->prev;
|
|
1084 _heapinfo[block].busy.type = 0;
|
|
1085 _heapinfo[block].busy.info.size = 1;
|
|
1086
|
|
1087 /* Keep the statistics accurate. */
|
|
1088 ++_chunks_used;
|
|
1089 _bytes_used += BLOCKSIZE;
|
|
1090 _chunks_free -= BLOCKSIZE >> type;
|
|
1091 _bytes_free -= BLOCKSIZE;
|
|
1092
|
|
1093 free (ADDRESS (block));
|
|
1094 }
|
|
1095 else if (_heapinfo[block].busy.info.frag.nfree != 0)
|
|
1096 {
|
|
1097 /* If some fragments of this block are free, link this
|
|
1098 fragment into the fragment list after the first free
|
|
1099 fragment of this block. */
|
|
1100 next = (struct list *) ptr;
|
|
1101 next->next = prev->next;
|
|
1102 next->prev = prev;
|
|
1103 prev->next = next;
|
|
1104 if (next->next != NULL)
|
|
1105 next->next->prev = next;
|
|
1106 ++_heapinfo[block].busy.info.frag.nfree;
|
|
1107 }
|
|
1108 else
|
|
1109 {
|
|
1110 /* No fragments of this block are free, so link this
|
|
1111 fragment into the fragment list and announce that
|
|
1112 it is the first free fragment of this block. */
|
|
1113 prev = (struct list *) ptr;
|
|
1114 _heapinfo[block].busy.info.frag.nfree = 1;
|
|
1115 _heapinfo[block].busy.info.frag.first = (unsigned long int)
|
|
1116 ((unsigned long int) ((char *) ptr - (char *) NULL)
|
|
1117 % BLOCKSIZE >> type);
|
|
1118 prev->next = _fraghead[type].next;
|
|
1119 prev->prev = &_fraghead[type];
|
|
1120 prev->prev->next = prev;
|
|
1121 if (prev->next != NULL)
|
|
1122 prev->next->prev = prev;
|
|
1123 }
|
|
1124 break;
|
|
1125 }
|
|
1126 }
|
|
1127
|
|
1128 /* Return memory to the heap. */
|
|
1129 void
|
|
1130 free (ptr)
|
|
1131 __ptr_t ptr;
|
|
1132 {
|
|
1133 if (__free_hook != NULL)
|
|
1134 (*__free_hook) (ptr);
|
|
1135 else
|
|
1136 _free_internal (ptr);
|
|
1137 }
|
|
1138
|
|
1139 /* Define the `cfree' alias for `free'. */
|
|
1140 #ifdef weak_alias
|
|
1141 weak_alias (free, cfree)
|
|
1142 #else
|
|
1143 void
|
|
1144 cfree (ptr)
|
|
1145 __ptr_t ptr;
|
|
1146 {
|
|
1147 free (ptr);
|
|
1148 }
|
|
1149 #endif
|
|
1150 /* Change the size of a block allocated by `malloc'.
|
|
1151 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
|
|
1152 Written May 1989 by Mike Haertel.
|
|
1153
|
|
1154 This library is free software; you can redistribute it and/or
|
|
1155 modify it under the terms of the GNU Library General Public License as
|
|
1156 published by the Free Software Foundation; either version 2 of the
|
|
1157 License, or (at your option) any later version.
|
|
1158
|
|
1159 This library is distributed in the hope that it will be useful,
|
|
1160 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
1161 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
1162 Library General Public License for more details.
|
|
1163
|
|
1164 You should have received a copy of the GNU Library General Public
|
|
1165 License along with this library; see the file COPYING.LIB. If
|
|
1166 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
|
|
1167 Cambridge, MA 02139, USA.
|
|
1168
|
|
1169 The author may be reached (Email) at the address mike@ai.mit.edu,
|
|
1170 or (US mail) as Mike Haertel c/o Free Software Foundation. */
|
|
1171
|
|
1172 #ifndef _MALLOC_INTERNAL
|
|
1173 #define _MALLOC_INTERNAL
|
|
1174 #include <malloc.h>
|
|
1175 #endif
|
|
1176
|
|
1177
|
|
1178
|
|
1179 /* Cope with systems lacking `memmove'. */
|
|
1180 #if (defined (MEMMOVE_MISSING) || \
|
|
1181 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG))
|
|
1182
|
|
1183 #ifdef emacs
|
|
1184 #undef __malloc_safe_bcopy
|
|
1185 #define __malloc_safe_bcopy safe_bcopy
|
|
1186 #else
|
|
1187
|
|
1188 /* Snarfed directly from Emacs src/dispnew.c:
|
|
1189 XXX Should use system bcopy if it handles overlap. */
|
|
1190
|
|
1191 /* Like bcopy except never gets confused by overlap. */
|
|
1192
|
|
1193 void
|
|
1194 __malloc_safe_bcopy (afrom, ato, size)
|
|
1195 __ptr_t afrom;
|
|
1196 __ptr_t ato;
|
|
1197 __malloc_size_t size;
|
|
1198 {
|
|
1199 char *from = afrom, *to = ato;
|
|
1200
|
|
1201 if (size <= 0 || from == to)
|
|
1202 return;
|
|
1203
|
|
1204 /* If the source and destination don't overlap, then bcopy can
|
|
1205 handle it. If they do overlap, but the destination is lower in
|
|
1206 memory than the source, we'll assume bcopy can handle that. */
|
|
1207 if (to < from || from + size <= to)
|
|
1208 bcopy (from, to, size);
|
|
1209
|
|
1210 /* Otherwise, we'll copy from the end. */
|
|
1211 else
|
|
1212 {
|
|
1213 register char *endf = from + size;
|
|
1214 register char *endt = to + size;
|
|
1215
|
|
1216 /* If TO - FROM is large, then we should break the copy into
|
|
1217 nonoverlapping chunks of TO - FROM bytes each. However, if
|
|
1218 TO - FROM is small, then the bcopy function call overhead
|
|
1219 makes this not worth it. The crossover point could be about
|
|
1220 anywhere. Since I don't think the obvious copy loop is too
|
|
1221 bad, I'm trying to err in its favor. */
|
|
1222 if (to - from < 64)
|
|
1223 {
|
|
1224 do
|
|
1225 *--endt = *--endf;
|
|
1226 while (endf != from);
|
|
1227 }
|
|
1228 else
|
|
1229 {
|
|
1230 for (;;)
|
|
1231 {
|
|
1232 endt -= (to - from);
|
|
1233 endf -= (to - from);
|
|
1234
|
|
1235 if (endt < to)
|
|
1236 break;
|
|
1237
|
|
1238 bcopy (endf, endt, to - from);
|
|
1239 }
|
|
1240
|
|
1241 /* If SIZE wasn't a multiple of TO - FROM, there will be a
|
|
1242 little left over. The amount left over is
|
|
1243 (endt + (to - from)) - to, which is endt - from. */
|
|
1244 bcopy (from, to, endt - from);
|
|
1245 }
|
|
1246 }
|
|
1247 }
|
|
1248 #endif /* emacs */
|
|
1249
|
|
1250 #ifndef memmove
|
|
1251 extern void __malloc_safe_bcopy __P ((__ptr_t, __ptr_t, __malloc_size_t));
|
|
1252 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size))
|
|
1253 #endif
|
|
1254
|
|
1255 #endif
|
|
1256
|
|
1257
|
|
1258 #define min(A, B) ((A) < (B) ? (A) : (B))
|
|
1259
|
|
1260 /* Debugging hook for realloc. */
|
|
1261 __ptr_t (*__realloc_hook) __P ((__ptr_t __ptr, __malloc_size_t __size));
|
|
1262
|
|
1263 /* Resize the given region to the new size, returning a pointer
|
|
1264 to the (possibly moved) region. This is optimized for speed;
|
|
1265 some benchmarks seem to indicate that greater compactness is
|
|
1266 achieved by unconditionally allocating and copying to a
|
|
1267 new region. This module has incestuous knowledge of the
|
|
1268 internals of both free and malloc. */
|
|
1269 __ptr_t
|
|
1270 _realloc_internal (ptr, size)
|
|
1271 __ptr_t ptr;
|
|
1272 __malloc_size_t size;
|
|
1273 {
|
|
1274 __ptr_t result;
|
|
1275 int type;
|
|
1276 __malloc_size_t block, blocks, oldlimit;
|
|
1277
|
|
1278 if (size == 0)
|
|
1279 {
|
|
1280 _free_internal (ptr);
|
|
1281 return _malloc_internal (0);
|
|
1282 }
|
|
1283 else if (ptr == NULL)
|
|
1284 return _malloc_internal (size);
|
|
1285
|
|
1286 block = BLOCK (ptr);
|
|
1287
|
|
1288 type = _heapinfo[block].busy.type;
|
|
1289 switch (type)
|
|
1290 {
|
|
1291 case 0:
|
|
1292 /* Maybe reallocate a large block to a small fragment. */
|
|
1293 if (size <= BLOCKSIZE / 2)
|
|
1294 {
|
|
1295 result = _malloc_internal (size);
|
|
1296 if (result != NULL)
|
|
1297 {
|
|
1298 memcpy (result, ptr, size);
|
|
1299 _free_internal (ptr);
|
|
1300 return result;
|
|
1301 }
|
|
1302 }
|
|
1303
|
|
1304 /* The new size is a large allocation as well;
|
|
1305 see if we can hold it in place. */
|
|
1306 blocks = BLOCKIFY (size);
|
|
1307 if (blocks < _heapinfo[block].busy.info.size)
|
|
1308 {
|
|
1309 /* The new size is smaller; return
|
|
1310 excess memory to the free list. */
|
|
1311 _heapinfo[block + blocks].busy.type = 0;
|
|
1312 _heapinfo[block + blocks].busy.info.size
|
|
1313 = _heapinfo[block].busy.info.size - blocks;
|
|
1314 _heapinfo[block].busy.info.size = blocks;
|
|
1315 /* We have just created a new chunk by splitting a chunk in two.
|
|
1316 Now we will free this chunk; increment the statistics counter
|
|
1317 so it doesn't become wrong when _free_internal decrements it. */
|
|
1318 ++_chunks_used;
|
|
1319 _free_internal (ADDRESS (block + blocks));
|
|
1320 result = ptr;
|
|
1321 }
|
|
1322 else if (blocks == _heapinfo[block].busy.info.size)
|
|
1323 /* No size change necessary. */
|
|
1324 result = ptr;
|
|
1325 else
|
|
1326 {
|
|
1327 /* Won't fit, so allocate a new region that will.
|
|
1328 Free the old region first in case there is sufficient
|
|
1329 adjacent free space to grow without moving. */
|
|
1330 blocks = _heapinfo[block].busy.info.size;
|
|
1331 /* Prevent free from actually returning memory to the system. */
|
|
1332 oldlimit = _heaplimit;
|
|
1333 _heaplimit = 0;
|
|
1334 _free_internal (ptr);
|
|
1335 result = _malloc_internal (size);
|
|
1336 if (_heaplimit == 0)
|
|
1337 _heaplimit = oldlimit;
|
|
1338 if (result == NULL)
|
|
1339 {
|
|
1340 /* Now we're really in trouble. We have to unfree
|
|
1341 the thing we just freed. Unfortunately it might
|
|
1342 have been coalesced with its neighbors. */
|
|
1343 if (_heapindex == block)
|
|
1344 (void) _malloc_internal (blocks * BLOCKSIZE);
|
|
1345 else
|
|
1346 {
|
|
1347 __ptr_t previous
|
|
1348 = _malloc_internal ((block - _heapindex) * BLOCKSIZE);
|
|
1349 (void) _malloc_internal (blocks * BLOCKSIZE);
|
|
1350 _free_internal (previous);
|
|
1351 }
|
|
1352 return NULL;
|
|
1353 }
|
|
1354 if (ptr != result)
|
|
1355 memmove (result, ptr, blocks * BLOCKSIZE);
|
|
1356 }
|
|
1357 break;
|
|
1358
|
|
1359 default:
|
|
1360 /* Old size is a fragment; type is logarithm
|
|
1361 to base two of the fragment size. */
|
|
1362 if (size > (__malloc_size_t) (1 << (type - 1)) &&
|
|
1363 size <= (__malloc_size_t) (1 << type))
|
|
1364 /* The new size is the same kind of fragment. */
|
|
1365 result = ptr;
|
|
1366 else
|
|
1367 {
|
|
1368 /* The new size is different; allocate a new space,
|
|
1369 and copy the lesser of the new size and the old. */
|
|
1370 result = _malloc_internal (size);
|
|
1371 if (result == NULL)
|
|
1372 return NULL;
|
|
1373 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
|
|
1374 _free_internal (ptr);
|
|
1375 }
|
|
1376 break;
|
|
1377 }
|
|
1378
|
|
1379 return result;
|
|
1380 }
|
|
1381
|
|
1382 __ptr_t
|
|
1383 realloc (ptr, size)
|
|
1384 __ptr_t ptr;
|
|
1385 __malloc_size_t size;
|
|
1386 {
|
|
1387 if (!__malloc_initialized && !__malloc_initialize ())
|
|
1388 return NULL;
|
|
1389
|
|
1390 return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal)
|
|
1391 (ptr, size);
|
|
1392 }
|
|
1393 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
|
|
1394
|
|
1395 This library is free software; you can redistribute it and/or
|
|
1396 modify it under the terms of the GNU Library General Public License as
|
|
1397 published by the Free Software Foundation; either version 2 of the
|
|
1398 License, or (at your option) any later version.
|
|
1399
|
|
1400 This library is distributed in the hope that it will be useful,
|
|
1401 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
1402 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
1403 Library General Public License for more details.
|
|
1404
|
|
1405 You should have received a copy of the GNU Library General Public
|
|
1406 License along with this library; see the file COPYING.LIB. If
|
|
1407 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
|
|
1408 Cambridge, MA 02139, USA.
|
|
1409
|
|
1410 The author may be reached (Email) at the address mike@ai.mit.edu,
|
|
1411 or (US mail) as Mike Haertel c/o Free Software Foundation. */
|
|
1412
|
|
1413 #ifndef _MALLOC_INTERNAL
|
|
1414 #define _MALLOC_INTERNAL
|
|
1415 #include <malloc.h>
|
|
1416 #endif
|
|
1417
|
|
1418 /* Allocate an array of NMEMB elements each SIZE bytes long.
|
|
1419 The entire array is initialized to zeros. */
|
|
1420 __ptr_t
|
|
1421 calloc (nmemb, size)
|
|
1422 register __malloc_size_t nmemb;
|
|
1423 register __malloc_size_t size;
|
|
1424 {
|
|
1425 register __ptr_t result = malloc (nmemb * size);
|
|
1426
|
|
1427 if (result != NULL)
|
|
1428 (void) memset (result, 0, nmemb * size);
|
|
1429
|
|
1430 return result;
|
|
1431 }
|
|
1432 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
|
|
1433 This file is part of the GNU C Library.
|
|
1434
|
|
1435 The GNU C Library is free software; you can redistribute it and/or modify
|
|
1436 it under the terms of the GNU General Public License as published by
|
|
1437 the Free Software Foundation; either version 2, or (at your option)
|
|
1438 any later version.
|
|
1439
|
|
1440 The GNU C Library is distributed in the hope that it will be useful,
|
|
1441 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
1442 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
1443 GNU General Public License for more details.
|
|
1444
|
|
1445 You should have received a copy of the GNU General Public License
|
|
1446 along with the GNU C Library; see the file COPYING. If not, write to
|
|
1447 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
|
|
1448
|
|
1449 #ifndef _MALLOC_INTERNAL
|
|
1450 #define _MALLOC_INTERNAL
|
|
1451 #include <malloc.h>
|
|
1452 #endif
|
|
1453
|
|
1454 #ifndef __GNU_LIBRARY__
|
|
1455 #define __sbrk sbrk
|
|
1456 #endif
|
|
1457
|
|
1458 #ifdef __GNU_LIBRARY__
|
|
1459 /* It is best not to declare this and cast its result on foreign operating
|
|
1460 systems with potentially hostile include files. */
|
|
1461
|
|
1462 #include <stddef.h>
|
|
1463 extern __ptr_t __sbrk __P ((ptrdiff_t increment));
|
|
1464 #endif
|
|
1465
|
|
1466 #ifndef NULL
|
|
1467 #define NULL 0
|
|
1468 #endif
|
|
1469
|
|
1470 /* Allocate INCREMENT more bytes of data space,
|
|
1471 and return the start of data space, or NULL on errors.
|
|
1472 If INCREMENT is negative, shrink data space. */
|
|
1473 __ptr_t
|
|
1474 __default_morecore (increment)
|
|
1475 __malloc_ptrdiff_t increment;
|
|
1476 {
|
|
1477 __ptr_t result = (__ptr_t) __sbrk (increment);
|
|
1478 if (result == (__ptr_t) -1)
|
|
1479 return NULL;
|
|
1480 return result;
|
|
1481 }
|
|
1482 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
|
|
1483
|
|
1484 This library is free software; you can redistribute it and/or
|
|
1485 modify it under the terms of the GNU Library General Public License as
|
|
1486 published by the Free Software Foundation; either version 2 of the
|
|
1487 License, or (at your option) any later version.
|
|
1488
|
|
1489 This library is distributed in the hope that it will be useful,
|
|
1490 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
1491 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
1492 Library General Public License for more details.
|
|
1493
|
|
1494 You should have received a copy of the GNU Library General Public
|
|
1495 License along with this library; see the file COPYING.LIB. If
|
|
1496 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
|
|
1497 Cambridge, MA 02139, USA. */
|
|
1498
|
|
1499 #ifndef _MALLOC_INTERNAL
|
|
1500 #define _MALLOC_INTERNAL
|
|
1501 #include <malloc.h>
|
|
1502 #endif
|
|
1503
|
|
1504 #if __DJGPP__ - 0 == 1
|
|
1505
|
|
1506 /* There is some problem with memalign in DJGPP v1 and we are supposed
|
|
1507 to omit it. Noone told me why, they just told me to do it. */
|
|
1508
|
|
1509 #else
|
|
1510
|
|
1511 __ptr_t (*__memalign_hook) __P ((size_t __size, size_t __alignment));
|
|
1512
|
|
1513 __ptr_t
|
|
1514 memalign (alignment, size)
|
|
1515 __malloc_size_t alignment;
|
|
1516 __malloc_size_t size;
|
|
1517 {
|
|
1518 __ptr_t result;
|
|
1519 unsigned long int adj, lastadj;
|
|
1520
|
|
1521 if (__memalign_hook)
|
|
1522 return (*__memalign_hook) (alignment, size);
|
|
1523
|
|
1524 /* Allocate a block with enough extra space to pad the block with up to
|
|
1525 (ALIGNMENT - 1) bytes if necessary. */
|
|
1526 result = malloc (size + alignment - 1);
|
|
1527 if (result == NULL)
|
|
1528 return NULL;
|
|
1529
|
|
1530 /* Figure out how much we will need to pad this particular block
|
|
1531 to achieve the required alignment. */
|
|
1532 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
|
|
1533
|
|
1534 do
|
|
1535 {
|
|
1536 /* Reallocate the block with only as much excess as it needs. */
|
|
1537 free (result);
|
|
1538 result = malloc (adj + size);
|
|
1539 if (result == NULL) /* Impossible unless interrupted. */
|
|
1540 return NULL;
|
|
1541
|
|
1542 lastadj = adj;
|
|
1543 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment;
|
|
1544 /* It's conceivable we might have been so unlucky as to get a
|
|
1545 different block with weaker alignment. If so, this block is too
|
|
1546 short to contain SIZE after alignment correction. So we must
|
|
1547 try again and get another block, slightly larger. */
|
|
1548 } while (adj > lastadj);
|
|
1549
|
|
1550 if (adj != 0)
|
|
1551 {
|
|
1552 /* Record this block in the list of aligned blocks, so that `free'
|
|
1553 can identify the pointer it is passed, which will be in the middle
|
|
1554 of an allocated block. */
|
|
1555
|
|
1556 struct alignlist *l;
|
|
1557 for (l = _aligned_blocks; l != NULL; l = l->next)
|
|
1558 if (l->aligned == NULL)
|
|
1559 /* This slot is free. Use it. */
|
|
1560 break;
|
|
1561 if (l == NULL)
|
|
1562 {
|
|
1563 l = (struct alignlist *) malloc (sizeof (struct alignlist));
|
|
1564 if (l == NULL)
|
|
1565 {
|
|
1566 free (result);
|
|
1567 return NULL;
|
|
1568 }
|
|
1569 l->next = _aligned_blocks;
|
|
1570 _aligned_blocks = l;
|
|
1571 }
|
|
1572 l->exact = result;
|
|
1573 result = l->aligned = (char *) result + alignment - adj;
|
|
1574 }
|
|
1575
|
|
1576 return result;
|
|
1577 }
|
|
1578
|
|
1579 #endif /* Not DJGPP v1 */
|
|
1580 /* Allocate memory on a page boundary.
|
|
1581 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
|
|
1582
|
|
1583 This library is free software; you can redistribute it and/or
|
|
1584 modify it under the terms of the GNU Library General Public License as
|
|
1585 published by the Free Software Foundation; either version 2 of the
|
|
1586 License, or (at your option) any later version.
|
|
1587
|
|
1588 This library is distributed in the hope that it will be useful,
|
|
1589 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
1590 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
1591 Library General Public License for more details.
|
|
1592
|
|
1593 You should have received a copy of the GNU Library General Public
|
|
1594 License along with this library; see the file COPYING.LIB. If
|
|
1595 not, write to the Free Software Foundation, Inc., 675 Mass Ave,
|
|
1596 Cambridge, MA 02139, USA.
|
|
1597
|
|
1598 The author may be reached (Email) at the address mike@ai.mit.edu,
|
|
1599 or (US mail) as Mike Haertel c/o Free Software Foundation. */
|
|
1600
|
|
1601 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)
|
|
1602
|
|
1603 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition
|
|
1604 on MSDOS, where it conflicts with a system header file. */
|
|
1605
|
|
1606 #define ELIDE_VALLOC
|
|
1607
|
|
1608 #endif
|
|
1609
|
|
1610 #ifndef ELIDE_VALLOC
|
|
1611
|
|
1612 #if defined (__GNU_LIBRARY__) || defined (_LIBC)
|
|
1613 #include <stddef.h>
|
|
1614 #include <sys/cdefs.h>
|
|
1615 extern size_t __getpagesize __P ((void));
|
|
1616 #else
|
|
1617 #include "getpagesize.h"
|
|
1618 #define __getpagesize() getpagesize()
|
|
1619 #endif
|
|
1620
|
|
1621 #ifndef _MALLOC_INTERNAL
|
|
1622 #define _MALLOC_INTERNAL
|
|
1623 #include <malloc.h>
|
|
1624 #endif
|
|
1625
|
|
1626 static __malloc_size_t pagesize;
|
|
1627
|
|
1628 __ptr_t
|
|
1629 valloc (size)
|
|
1630 __malloc_size_t size;
|
|
1631 {
|
|
1632 if (pagesize == 0)
|
|
1633 pagesize = __getpagesize ();
|
|
1634
|
|
1635 return memalign (pagesize, size);
|
|
1636 }
|
|
1637
|
|
1638 #endif /* Not ELIDE_VALLOC. */
|