Mercurial > emacs
annotate src/gmalloc.c @ 26749:5bc04426fb75
Include <syms.h>, not <sym.h> on IRIX. Removed
duplicate definition of ElfW.
(find_section): Copied from unexsgi.c.
(unexec): Use find_section. Adjust whitespace. Initialize
new_data2_offset based on old_data, not sbss (this fixes a bug on
IRIX6). Change #ifdef __mips to __sgi, since it's IRIX-specific.
Adjust test for presence of .mdebug section to the new return
value of find_section.
Merge changes from 20.5.
(unexec): Handle .lit4 and .lit8 unconditionally.
author | Gerd Moellmann <gerd@gnu.org> |
---|---|
date | Tue, 07 Dec 1999 09:50:01 +0000 |
parents | b7438760079b |
children | f9aeac6780a1 |
rev | line source |
---|---|
17130 | 1 /* This file is no longer automatically generated from libc. */ |
2 | |
3 #define _MALLOC_INTERNAL | |
4 | |
5 /* The malloc headers and source files from the C library follow here. */ | |
6 | |
7 /* Declarations for `malloc' and friends. | |
26088
b7aa6ac26872
Add support for large files, 64-bit Solaris, system locale codings.
Paul Eggert <eggert@twinsun.com>
parents:
18667
diff
changeset
|
8 Copyright 1990, 91, 92, 93, 95, 96, 99 Free Software Foundation, Inc. |
17130 | 9 Written May 1989 by Mike Haertel. |
10 | |
11 This library is free software; you can redistribute it and/or | |
12 modify it under the terms of the GNU Library General Public License as | |
13 published by the Free Software Foundation; either version 2 of the | |
14 License, or (at your option) any later version. | |
15 | |
16 This library is distributed in the hope that it will be useful, | |
17 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 Library General Public License for more details. | |
20 | |
21 You should have received a copy of the GNU Library General Public | |
22 License along with this library; see the file COPYING.LIB. If | |
23 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
24 Cambridge, MA 02139, USA. | |
25 | |
26 The author may be reached (Email) at the address mike@ai.mit.edu, | |
27 or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
28 | |
29 #ifndef _MALLOC_H | |
30 | |
31 #define _MALLOC_H 1 | |
32 | |
33 #ifdef _MALLOC_INTERNAL | |
34 | |
35 #ifdef HAVE_CONFIG_H | |
36 #include <config.h> | |
37 #endif | |
38 | |
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
39 #if defined __cplusplus || (defined (__STDC__) && __STDC__) || defined STDC_HEADERS |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
40 #undef PP |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
41 #define PP(args) args |
17130 | 42 #undef __ptr_t |
43 #define __ptr_t void * | |
44 #else /* Not C++ or ANSI C. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
45 #undef PP |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
46 #define PP(args) () |
17130 | 47 #undef __ptr_t |
48 #define __ptr_t char * | |
49 #endif /* C++ or ANSI C. */ | |
50 | |
51 #if defined(_LIBC) || defined(STDC_HEADERS) || defined(USG) | |
52 #include <string.h> | |
53 #else | |
54 #ifndef memset | |
55 #define memset(s, zero, n) bzero ((s), (n)) | |
56 #endif | |
57 #ifndef memcpy | |
58 #define memcpy(d, s, n) bcopy ((s), (d), (n)) | |
59 #endif | |
60 #endif | |
61 | |
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
62 #ifdef HAVE_LIMITS_H |
17130 | 63 #include <limits.h> |
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
64 #endif |
17130 | 65 #ifndef CHAR_BIT |
66 #define CHAR_BIT 8 | |
67 #endif | |
68 | |
69 #ifdef HAVE_UNISTD_H | |
70 #include <unistd.h> | |
71 #endif | |
72 | |
73 #endif /* _MALLOC_INTERNAL. */ | |
74 | |
75 | |
76 #ifdef __cplusplus | |
77 extern "C" | |
78 { | |
79 #endif | |
80 | |
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
81 #ifdef STDC_HEADERS |
17130 | 82 #include <stddef.h> |
83 #define __malloc_size_t size_t | |
84 #define __malloc_ptrdiff_t ptrdiff_t | |
85 #else | |
86 #define __malloc_size_t unsigned int | |
87 #define __malloc_ptrdiff_t int | |
88 #endif | |
89 | |
90 #ifndef NULL | |
91 #define NULL 0 | |
92 #endif | |
93 | |
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
94 #ifndef FREE_RETURN_TYPE |
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
95 #define FREE_RETURN_TYPE void |
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
96 #endif |
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
97 |
17130 | 98 |
99 /* Allocate SIZE bytes of memory. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
100 extern __ptr_t malloc PP ((__malloc_size_t __size)); |
17130 | 101 /* Re-allocate the previously allocated block |
102 in __ptr_t, making the new block SIZE bytes long. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
103 extern __ptr_t realloc PP ((__ptr_t __ptr, __malloc_size_t __size)); |
17130 | 104 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0. */ |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
105 extern __ptr_t calloc PP ((__malloc_size_t __nmemb, __malloc_size_t __size)); |
17130 | 106 /* Free a block allocated by `malloc', `realloc' or `calloc'. */ |
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
107 extern FREE_RETURN_TYPE free PP ((__ptr_t __ptr)); |
17130 | 108 |
109 /* Allocate SIZE bytes allocated to ALIGNMENT bytes. */ | |
110 #if ! (defined (_MALLOC_INTERNAL) && __DJGPP__ - 0 == 1) /* Avoid conflict. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
111 extern __ptr_t memalign PP ((__malloc_size_t __alignment, |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
112 __malloc_size_t __size)); |
17130 | 113 #endif |
114 | |
115 /* Allocate SIZE bytes on a page boundary. */ | |
116 #if ! (defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC)) | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
117 extern __ptr_t valloc PP ((__malloc_size_t __size)); |
17130 | 118 #endif |
119 | |
120 | |
121 #ifdef _MALLOC_INTERNAL | |
122 | |
123 /* The allocator divides the heap into blocks of fixed size; large | |
124 requests receive one or more whole blocks, and small requests | |
125 receive a fragment of a block. Fragment sizes are powers of two, | |
126 and all fragments of a block are the same size. When all the | |
127 fragments in a block have been freed, the block itself is freed. */ | |
128 #define INT_BIT (CHAR_BIT * sizeof(int)) | |
129 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9) | |
130 #define BLOCKSIZE (1 << BLOCKLOG) | |
131 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE) | |
132 | |
133 /* Determine the amount of memory spanned by the initial heap table | |
134 (not an absolute limit). */ | |
135 #define HEAP (INT_BIT > 16 ? 4194304 : 65536) | |
136 | |
137 /* Number of contiguous free blocks allowed to build up at the end of | |
138 memory before they will be returned to the system. */ | |
139 #define FINAL_FREE_BLOCKS 8 | |
140 | |
141 /* Data structure giving per-block information. */ | |
142 typedef union | |
143 { | |
144 /* Heap information for a busy block. */ | |
145 struct | |
146 { | |
147 /* Zero for a large (multiblock) object, or positive giving the | |
148 logarithm to the base two of the fragment size. */ | |
149 int type; | |
150 union | |
151 { | |
152 struct | |
153 { | |
154 __malloc_size_t nfree; /* Free frags in a fragmented block. */ | |
155 __malloc_size_t first; /* First free fragment of the block. */ | |
156 } frag; | |
157 /* For a large object, in its first block, this has the number | |
158 of blocks in the object. In the other blocks, this has a | |
159 negative number which says how far back the first block is. */ | |
160 __malloc_ptrdiff_t size; | |
161 } info; | |
162 } busy; | |
163 /* Heap information for a free block | |
164 (that may be the first of a free cluster). */ | |
165 struct | |
166 { | |
167 __malloc_size_t size; /* Size (in blocks) of a free cluster. */ | |
168 __malloc_size_t next; /* Index of next free cluster. */ | |
169 __malloc_size_t prev; /* Index of previous free cluster. */ | |
170 } free; | |
171 } malloc_info; | |
172 | |
173 /* Pointer to first block of the heap. */ | |
174 extern char *_heapbase; | |
175 | |
176 /* Table indexed by block number giving per-block information. */ | |
177 extern malloc_info *_heapinfo; | |
178 | |
179 /* Address to block number and vice versa. */ | |
180 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1) | |
181 #define ADDRESS(B) ((__ptr_t) (((B) - 1) * BLOCKSIZE + _heapbase)) | |
182 | |
183 /* Current search index for the heap table. */ | |
184 extern __malloc_size_t _heapindex; | |
185 | |
186 /* Limit of valid info table indices. */ | |
187 extern __malloc_size_t _heaplimit; | |
188 | |
189 /* Doubly linked lists of free fragments. */ | |
190 struct list | |
191 { | |
192 struct list *next; | |
193 struct list *prev; | |
194 }; | |
195 | |
196 /* Free list headers for each fragment size. */ | |
197 extern struct list _fraghead[]; | |
198 | |
199 /* List of blocks allocated with `memalign' (or `valloc'). */ | |
200 struct alignlist | |
201 { | |
202 struct alignlist *next; | |
203 __ptr_t aligned; /* The address that memaligned returned. */ | |
204 __ptr_t exact; /* The address that malloc returned. */ | |
205 }; | |
206 extern struct alignlist *_aligned_blocks; | |
207 | |
208 /* Instrumentation. */ | |
209 extern __malloc_size_t _chunks_used; | |
210 extern __malloc_size_t _bytes_used; | |
211 extern __malloc_size_t _chunks_free; | |
212 extern __malloc_size_t _bytes_free; | |
213 | |
214 /* Internal versions of `malloc', `realloc', and `free' | |
215 used when these functions need to call each other. | |
216 They are the same but don't call the hooks. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
217 extern __ptr_t _malloc_internal PP ((__malloc_size_t __size)); |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
218 extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size)); |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
219 extern void _free_internal PP ((__ptr_t __ptr)); |
17130 | 220 |
221 #endif /* _MALLOC_INTERNAL. */ | |
222 | |
223 /* Given an address in the middle of a malloc'd object, | |
224 return the address of the beginning of the object. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
225 extern __ptr_t malloc_find_object_address PP ((__ptr_t __ptr)); |
17130 | 226 |
227 /* Underlying allocation function; successive calls should | |
228 return contiguous pieces of memory. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
229 extern __ptr_t (*__morecore) PP ((__malloc_ptrdiff_t __size)); |
17130 | 230 |
231 /* Default value of `__morecore'. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
232 extern __ptr_t __default_morecore PP ((__malloc_ptrdiff_t __size)); |
17130 | 233 |
234 /* If not NULL, this function is called after each time | |
235 `__morecore' is called to increase the data size. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
236 extern void (*__after_morecore_hook) PP ((void)); |
17130 | 237 |
238 /* Number of extra blocks to get each time we ask for more core. | |
239 This reduces the frequency of calling `(*__morecore)'. */ | |
240 extern __malloc_size_t __malloc_extra_blocks; | |
241 | |
242 /* Nonzero if `malloc' has been called and done its initialization. */ | |
243 extern int __malloc_initialized; | |
244 /* Function called to initialize malloc data structures. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
245 extern int __malloc_initialize PP ((void)); |
17130 | 246 |
247 /* Hooks for debugging versions. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
248 extern void (*__malloc_initialize_hook) PP ((void)); |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
249 extern void (*__free_hook) PP ((__ptr_t __ptr)); |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
250 extern __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size)); |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
251 extern __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size)); |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
252 extern __ptr_t (*__memalign_hook) PP ((__malloc_size_t __size, |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
253 __malloc_size_t __alignment)); |
17130 | 254 |
255 /* Return values for `mprobe': these are the kinds of inconsistencies that | |
256 `mcheck' enables detection of. */ | |
257 enum mcheck_status | |
258 { | |
259 MCHECK_DISABLED = -1, /* Consistency checking is not turned on. */ | |
260 MCHECK_OK, /* Block is fine. */ | |
261 MCHECK_FREE, /* Block freed twice. */ | |
262 MCHECK_HEAD, /* Memory before the block was clobbered. */ | |
263 MCHECK_TAIL /* Memory after the block was clobbered. */ | |
264 }; | |
265 | |
266 /* Activate a standard collection of debugging hooks. This must be called | |
267 before `malloc' is ever called. ABORTFUNC is called with an error code | |
268 (see enum above) when an inconsistency is detected. If ABORTFUNC is | |
269 null, the standard function prints on stderr and then calls `abort'. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
270 extern int mcheck PP ((void (*__abortfunc) PP ((enum mcheck_status)))); |
17130 | 271 |
272 /* Check for aberrations in a particular malloc'd block. You must have | |
273 called `mcheck' already. These are the same checks that `mcheck' does | |
274 when you free or reallocate a block. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
275 extern enum mcheck_status mprobe PP ((__ptr_t __ptr)); |
17130 | 276 |
277 /* Activate a standard collection of tracing hooks. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
278 extern void mtrace PP ((void)); |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
279 extern void muntrace PP ((void)); |
17130 | 280 |
281 /* Statistics available to the user. */ | |
282 struct mstats | |
283 { | |
284 __malloc_size_t bytes_total; /* Total size of the heap. */ | |
285 __malloc_size_t chunks_used; /* Chunks allocated by the user. */ | |
286 __malloc_size_t bytes_used; /* Byte total of user-allocated chunks. */ | |
287 __malloc_size_t chunks_free; /* Chunks in the free list. */ | |
288 __malloc_size_t bytes_free; /* Byte total of chunks in the free list. */ | |
289 }; | |
290 | |
291 /* Pick up the current statistics. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
292 extern struct mstats mstats PP ((void)); |
17130 | 293 |
294 /* Call WARNFUN with a warning message when memory usage is high. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
295 extern void memory_warnings PP ((__ptr_t __start, |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
296 void (*__warnfun) PP ((const char *)))); |
17130 | 297 |
298 | |
299 /* Relocating allocator. */ | |
300 | |
301 /* Allocate SIZE bytes, and store the address in *HANDLEPTR. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
302 extern __ptr_t r_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size)); |
17130 | 303 |
304 /* Free the storage allocated in HANDLEPTR. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
305 extern void r_alloc_free PP ((__ptr_t *__handleptr)); |
17130 | 306 |
307 /* Adjust the block at HANDLEPTR to be SIZE bytes long. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
308 extern __ptr_t r_re_alloc PP ((__ptr_t *__handleptr, __malloc_size_t __size)); |
17130 | 309 |
310 | |
311 #ifdef __cplusplus | |
312 } | |
313 #endif | |
314 | |
315 #endif /* malloc.h */ | |
316 /* Memory allocator `malloc'. | |
317 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. | |
318 Written May 1989 by Mike Haertel. | |
319 | |
320 This library is free software; you can redistribute it and/or | |
321 modify it under the terms of the GNU Library General Public License as | |
322 published by the Free Software Foundation; either version 2 of the | |
323 License, or (at your option) any later version. | |
324 | |
325 This library is distributed in the hope that it will be useful, | |
326 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
327 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
328 Library General Public License for more details. | |
329 | |
330 You should have received a copy of the GNU Library General Public | |
331 License along with this library; see the file COPYING.LIB. If | |
332 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
333 Cambridge, MA 02139, USA. | |
334 | |
335 The author may be reached (Email) at the address mike@ai.mit.edu, | |
336 or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
337 | |
338 #ifndef _MALLOC_INTERNAL | |
339 #define _MALLOC_INTERNAL | |
340 #include <malloc.h> | |
341 #endif | |
342 #include <errno.h> | |
343 | |
344 /* How to really get more memory. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
345 __ptr_t (*__morecore) PP ((ptrdiff_t __size)) = __default_morecore; |
17130 | 346 |
347 /* Debugging hook for `malloc'. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
348 __ptr_t (*__malloc_hook) PP ((__malloc_size_t __size)); |
17130 | 349 |
350 /* Pointer to the base of the first block. */ | |
351 char *_heapbase; | |
352 | |
353 /* Block information table. Allocated with align/__free (not malloc/free). */ | |
354 malloc_info *_heapinfo; | |
355 | |
356 /* Number of info entries. */ | |
357 static __malloc_size_t heapsize; | |
358 | |
359 /* Search index in the info table. */ | |
360 __malloc_size_t _heapindex; | |
361 | |
362 /* Limit of valid info table indices. */ | |
363 __malloc_size_t _heaplimit; | |
364 | |
365 /* Free lists for each fragment size. */ | |
366 struct list _fraghead[BLOCKLOG]; | |
367 | |
368 /* Instrumentation. */ | |
369 __malloc_size_t _chunks_used; | |
370 __malloc_size_t _bytes_used; | |
371 __malloc_size_t _chunks_free; | |
372 __malloc_size_t _bytes_free; | |
373 | |
374 /* Are you experienced? */ | |
375 int __malloc_initialized; | |
376 | |
377 __malloc_size_t __malloc_extra_blocks; | |
378 | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
379 void (*__malloc_initialize_hook) PP ((void)); |
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
380 void (*__after_morecore_hook) PP ((void)); |
17130 | 381 |
382 | |
383 /* Aligned allocation. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
384 static __ptr_t align PP ((__malloc_size_t)); |
17130 | 385 static __ptr_t |
386 align (size) | |
387 __malloc_size_t size; | |
388 { | |
389 __ptr_t result; | |
390 unsigned long int adj; | |
391 | |
392 result = (*__morecore) (size); | |
393 adj = (unsigned long int) ((unsigned long int) ((char *) result - | |
394 (char *) NULL)) % BLOCKSIZE; | |
395 if (adj != 0) | |
396 { | |
397 __ptr_t new; | |
398 adj = BLOCKSIZE - adj; | |
399 new = (*__morecore) (adj); | |
400 result = (char *) result + adj; | |
401 } | |
402 | |
403 if (__after_morecore_hook) | |
404 (*__after_morecore_hook) (); | |
405 | |
406 return result; | |
407 } | |
408 | |
409 /* Get SIZE bytes, if we can get them starting at END. | |
410 Return the address of the space we got. | |
411 If we cannot get space at END, fail and return 0. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
412 static __ptr_t get_contiguous_space PP ((__malloc_ptrdiff_t, __ptr_t)); |
17130 | 413 static __ptr_t |
414 get_contiguous_space (size, position) | |
415 __malloc_ptrdiff_t size; | |
416 __ptr_t position; | |
417 { | |
418 __ptr_t before; | |
419 __ptr_t after; | |
420 | |
421 before = (*__morecore) (0); | |
422 /* If we can tell in advance that the break is at the wrong place, | |
423 fail now. */ | |
424 if (before != position) | |
425 return 0; | |
426 | |
427 /* Allocate SIZE bytes and get the address of them. */ | |
428 after = (*__morecore) (size); | |
429 if (!after) | |
430 return 0; | |
431 | |
432 /* It was not contiguous--reject it. */ | |
433 if (after != position) | |
434 { | |
435 (*__morecore) (- size); | |
436 return 0; | |
437 } | |
438 | |
439 return after; | |
440 } | |
441 | |
442 | |
443 /* This is called when `_heapinfo' and `heapsize' have just | |
444 been set to describe a new info table. Set up the table | |
445 to describe itself and account for it in the statistics. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
446 static void register_heapinfo PP ((void)); |
17130 | 447 #ifdef __GNUC__ |
448 __inline__ | |
449 #endif | |
450 static void | |
451 register_heapinfo () | |
452 { | |
453 __malloc_size_t block, blocks; | |
454 | |
455 block = BLOCK (_heapinfo); | |
456 blocks = BLOCKIFY (heapsize * sizeof (malloc_info)); | |
457 | |
458 /* Account for the _heapinfo block itself in the statistics. */ | |
459 _bytes_used += blocks * BLOCKSIZE; | |
460 ++_chunks_used; | |
461 | |
462 /* Describe the heapinfo block itself in the heapinfo. */ | |
463 _heapinfo[block].busy.type = 0; | |
464 _heapinfo[block].busy.info.size = blocks; | |
465 /* Leave back-pointers for malloc_find_address. */ | |
466 while (--blocks > 0) | |
467 _heapinfo[block + blocks].busy.info.size = -blocks; | |
468 } | |
469 | |
470 /* Set everything up and remember that we have. */ | |
471 int | |
472 __malloc_initialize () | |
473 { | |
474 if (__malloc_initialized) | |
475 return 0; | |
476 | |
477 if (__malloc_initialize_hook) | |
478 (*__malloc_initialize_hook) (); | |
479 | |
480 heapsize = HEAP / BLOCKSIZE; | |
481 _heapinfo = (malloc_info *) align (heapsize * sizeof (malloc_info)); | |
482 if (_heapinfo == NULL) | |
483 return 0; | |
484 memset (_heapinfo, 0, heapsize * sizeof (malloc_info)); | |
485 _heapinfo[0].free.size = 0; | |
486 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0; | |
487 _heapindex = 0; | |
488 _heapbase = (char *) _heapinfo; | |
489 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info)); | |
490 | |
491 register_heapinfo (); | |
492 | |
493 __malloc_initialized = 1; | |
494 return 1; | |
495 } | |
496 | |
497 static int morecore_recursing; | |
498 | |
499 /* Get neatly aligned memory, initializing or | |
500 growing the heap info table as necessary. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
501 static __ptr_t morecore PP ((__malloc_size_t)); |
17130 | 502 static __ptr_t |
503 morecore (size) | |
504 __malloc_size_t size; | |
505 { | |
506 __ptr_t result; | |
507 malloc_info *newinfo, *oldinfo; | |
508 __malloc_size_t newsize; | |
509 | |
510 if (morecore_recursing) | |
511 /* Avoid recursion. The caller will know how to handle a null return. */ | |
512 return NULL; | |
513 | |
514 result = align (size); | |
515 if (result == NULL) | |
516 return NULL; | |
517 | |
518 /* Check if we need to grow the info table. */ | |
519 if ((__malloc_size_t) BLOCK ((char *) result + size) > heapsize) | |
520 { | |
521 /* Calculate the new _heapinfo table size. We do not account for the | |
522 added blocks in the table itself, as we hope to place them in | |
523 existing free space, which is already covered by part of the | |
524 existing table. */ | |
525 newsize = heapsize; | |
526 do | |
527 newsize *= 2; | |
528 while ((__malloc_size_t) BLOCK ((char *) result + size) > newsize); | |
529 | |
530 /* We must not reuse existing core for the new info table when called | |
531 from realloc in the case of growing a large block, because the | |
532 block being grown is momentarily marked as free. In this case | |
533 _heaplimit is zero so we know not to reuse space for internal | |
534 allocation. */ | |
535 if (_heaplimit != 0) | |
536 { | |
537 /* First try to allocate the new info table in core we already | |
538 have, in the usual way using realloc. If realloc cannot | |
539 extend it in place or relocate it to existing sufficient core, | |
540 we will get called again, and the code above will notice the | |
541 `morecore_recursing' flag and return null. */ | |
542 int save = errno; /* Don't want to clobber errno with ENOMEM. */ | |
543 morecore_recursing = 1; | |
544 newinfo = (malloc_info *) _realloc_internal | |
545 (_heapinfo, newsize * sizeof (malloc_info)); | |
546 morecore_recursing = 0; | |
547 if (newinfo == NULL) | |
548 errno = save; | |
549 else | |
550 { | |
551 /* We found some space in core, and realloc has put the old | |
552 table's blocks on the free list. Now zero the new part | |
553 of the table and install the new table location. */ | |
554 memset (&newinfo[heapsize], 0, | |
555 (newsize - heapsize) * sizeof (malloc_info)); | |
556 _heapinfo = newinfo; | |
557 heapsize = newsize; | |
558 goto got_heap; | |
559 } | |
560 } | |
561 | |
562 /* Allocate new space for the malloc info table. */ | |
563 while (1) | |
564 { | |
565 newinfo = (malloc_info *) align (newsize * sizeof (malloc_info)); | |
566 | |
567 /* Did it fail? */ | |
568 if (newinfo == NULL) | |
569 { | |
570 (*__morecore) (-size); | |
571 return NULL; | |
572 } | |
573 | |
574 /* Is it big enough to record status for its own space? | |
575 If so, we win. */ | |
576 if ((__malloc_size_t) BLOCK ((char *) newinfo | |
577 + newsize * sizeof (malloc_info)) | |
578 < newsize) | |
579 break; | |
580 | |
581 /* Must try again. First give back most of what we just got. */ | |
582 (*__morecore) (- newsize * sizeof (malloc_info)); | |
583 newsize *= 2; | |
584 } | |
585 | |
586 /* Copy the old table to the beginning of the new, | |
587 and zero the rest of the new table. */ | |
588 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info)); | |
589 memset (&newinfo[heapsize], 0, | |
590 (newsize - heapsize) * sizeof (malloc_info)); | |
591 oldinfo = _heapinfo; | |
592 _heapinfo = newinfo; | |
593 heapsize = newsize; | |
594 | |
595 register_heapinfo (); | |
596 | |
597 /* Reset _heaplimit so _free_internal never decides | |
598 it can relocate or resize the info table. */ | |
599 _heaplimit = 0; | |
600 _free_internal (oldinfo); | |
601 | |
602 /* The new heap limit includes the new table just allocated. */ | |
603 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info)); | |
604 return result; | |
605 } | |
606 | |
607 got_heap: | |
608 _heaplimit = BLOCK ((char *) result + size); | |
609 return result; | |
610 } | |
611 | |
612 /* Allocate memory from the heap. */ | |
613 __ptr_t | |
614 _malloc_internal (size) | |
615 __malloc_size_t size; | |
616 { | |
617 __ptr_t result; | |
618 __malloc_size_t block, blocks, lastblocks, start; | |
619 register __malloc_size_t i; | |
620 struct list *next; | |
621 | |
622 /* ANSI C allows `malloc (0)' to either return NULL, or to return a | |
623 valid address you can realloc and free (though not dereference). | |
624 | |
625 It turns out that some extant code (sunrpc, at least Ultrix's version) | |
626 expects `malloc (0)' to return non-NULL and breaks otherwise. | |
627 Be compatible. */ | |
628 | |
629 #if 0 | |
630 if (size == 0) | |
631 return NULL; | |
632 #endif | |
633 | |
634 if (size < sizeof (struct list)) | |
635 size = sizeof (struct list); | |
636 | |
637 #ifdef SUNOS_LOCALTIME_BUG | |
638 if (size < 16) | |
639 size = 16; | |
640 #endif | |
641 | |
642 /* Determine the allocation policy based on the request size. */ | |
643 if (size <= BLOCKSIZE / 2) | |
644 { | |
645 /* Small allocation to receive a fragment of a block. | |
646 Determine the logarithm to base two of the fragment size. */ | |
647 register __malloc_size_t log = 1; | |
648 --size; | |
649 while ((size /= 2) != 0) | |
650 ++log; | |
651 | |
652 /* Look in the fragment lists for a | |
653 free fragment of the desired size. */ | |
654 next = _fraghead[log].next; | |
655 if (next != NULL) | |
656 { | |
657 /* There are free fragments of this size. | |
658 Pop a fragment out of the fragment list and return it. | |
659 Update the block's nfree and first counters. */ | |
660 result = (__ptr_t) next; | |
661 next->prev->next = next->next; | |
662 if (next->next != NULL) | |
663 next->next->prev = next->prev; | |
664 block = BLOCK (result); | |
665 if (--_heapinfo[block].busy.info.frag.nfree != 0) | |
666 _heapinfo[block].busy.info.frag.first = (unsigned long int) | |
667 ((unsigned long int) ((char *) next->next - (char *) NULL) | |
668 % BLOCKSIZE) >> log; | |
669 | |
670 /* Update the statistics. */ | |
671 ++_chunks_used; | |
672 _bytes_used += 1 << log; | |
673 --_chunks_free; | |
674 _bytes_free -= 1 << log; | |
675 } | |
676 else | |
677 { | |
678 /* No free fragments of the desired size, so get a new block | |
679 and break it into fragments, returning the first. */ | |
680 result = malloc (BLOCKSIZE); | |
681 if (result == NULL) | |
682 return NULL; | |
683 | |
684 /* Link all fragments but the first into the free list. */ | |
685 next = (struct list *) ((char *) result + (1 << log)); | |
686 next->next = NULL; | |
687 next->prev = &_fraghead[log]; | |
688 _fraghead[log].next = next; | |
689 | |
690 for (i = 2; i < (__malloc_size_t) (BLOCKSIZE >> log); ++i) | |
691 { | |
692 next = (struct list *) ((char *) result + (i << log)); | |
693 next->next = _fraghead[log].next; | |
694 next->prev = &_fraghead[log]; | |
695 next->prev->next = next; | |
696 next->next->prev = next; | |
697 } | |
698 | |
699 /* Initialize the nfree and first counters for this block. */ | |
700 block = BLOCK (result); | |
701 _heapinfo[block].busy.type = log; | |
702 _heapinfo[block].busy.info.frag.nfree = i - 1; | |
703 _heapinfo[block].busy.info.frag.first = i - 1; | |
704 | |
705 _chunks_free += (BLOCKSIZE >> log) - 1; | |
706 _bytes_free += BLOCKSIZE - (1 << log); | |
707 _bytes_used -= BLOCKSIZE - (1 << log); | |
708 } | |
709 } | |
710 else | |
711 { | |
712 /* Large allocation to receive one or more blocks. | |
713 Search the free list in a circle starting at the last place visited. | |
714 If we loop completely around without finding a large enough | |
715 space we will have to get more memory from the system. */ | |
716 blocks = BLOCKIFY (size); | |
717 start = block = _heapindex; | |
718 while (_heapinfo[block].free.size < blocks) | |
719 { | |
720 block = _heapinfo[block].free.next; | |
721 if (block == start) | |
722 { | |
723 /* Need to get more from the system. Get a little extra. */ | |
724 __malloc_size_t wantblocks = blocks + __malloc_extra_blocks; | |
725 block = _heapinfo[0].free.prev; | |
726 lastblocks = _heapinfo[block].free.size; | |
727 /* Check to see if the new core will be contiguous with the | |
728 final free block; if so we don't need to get as much. */ | |
729 if (_heaplimit != 0 && block + lastblocks == _heaplimit && | |
730 /* We can't do this if we will have to make the heap info | |
731 table bigger to accomodate the new space. */ | |
732 block + wantblocks <= heapsize && | |
733 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE, | |
734 ADDRESS (block + lastblocks))) | |
735 { | |
736 /* We got it contiguously. Which block we are extending | |
737 (the `final free block' referred to above) might have | |
738 changed, if it got combined with a freed info table. */ | |
739 block = _heapinfo[0].free.prev; | |
740 _heapinfo[block].free.size += (wantblocks - lastblocks); | |
741 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE; | |
742 _heaplimit += wantblocks - lastblocks; | |
743 continue; | |
744 } | |
745 result = morecore (wantblocks * BLOCKSIZE); | |
746 if (result == NULL) | |
747 return NULL; | |
748 block = BLOCK (result); | |
749 /* Put the new block at the end of the free list. */ | |
750 _heapinfo[block].free.size = wantblocks; | |
751 _heapinfo[block].free.prev = _heapinfo[0].free.prev; | |
752 _heapinfo[block].free.next = 0; | |
753 _heapinfo[0].free.prev = block; | |
754 _heapinfo[_heapinfo[block].free.prev].free.next = block; | |
755 ++_chunks_free; | |
756 /* Now loop to use some of that block for this allocation. */ | |
757 } | |
758 } | |
759 | |
760 /* At this point we have found a suitable free list entry. | |
761 Figure out how to remove what we need from the list. */ | |
762 result = ADDRESS (block); | |
763 if (_heapinfo[block].free.size > blocks) | |
764 { | |
765 /* The block we found has a bit left over, | |
766 so relink the tail end back into the free list. */ | |
767 _heapinfo[block + blocks].free.size | |
768 = _heapinfo[block].free.size - blocks; | |
769 _heapinfo[block + blocks].free.next | |
770 = _heapinfo[block].free.next; | |
771 _heapinfo[block + blocks].free.prev | |
772 = _heapinfo[block].free.prev; | |
773 _heapinfo[_heapinfo[block].free.prev].free.next | |
774 = _heapinfo[_heapinfo[block].free.next].free.prev | |
775 = _heapindex = block + blocks; | |
776 } | |
777 else | |
778 { | |
779 /* The block exactly matches our requirements, | |
780 so just remove it from the list. */ | |
781 _heapinfo[_heapinfo[block].free.next].free.prev | |
782 = _heapinfo[block].free.prev; | |
783 _heapinfo[_heapinfo[block].free.prev].free.next | |
784 = _heapindex = _heapinfo[block].free.next; | |
785 --_chunks_free; | |
786 } | |
787 | |
788 _heapinfo[block].busy.type = 0; | |
789 _heapinfo[block].busy.info.size = blocks; | |
790 ++_chunks_used; | |
791 _bytes_used += blocks * BLOCKSIZE; | |
792 _bytes_free -= blocks * BLOCKSIZE; | |
793 | |
794 /* Mark all the blocks of the object just allocated except for the | |
795 first with a negative number so you can find the first block by | |
796 adding that adjustment. */ | |
797 while (--blocks > 0) | |
798 _heapinfo[block + blocks].busy.info.size = -blocks; | |
799 } | |
800 | |
801 return result; | |
802 } | |
803 | |
804 __ptr_t | |
805 malloc (size) | |
806 __malloc_size_t size; | |
807 { | |
808 if (!__malloc_initialized && !__malloc_initialize ()) | |
809 return NULL; | |
810 | |
811 return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size); | |
812 } | |
813 | |
814 #ifndef _LIBC | |
815 | |
816 /* On some ANSI C systems, some libc functions call _malloc, _free | |
817 and _realloc. Make them use the GNU functions. */ | |
818 | |
819 __ptr_t | |
820 _malloc (size) | |
821 __malloc_size_t size; | |
822 { | |
823 return malloc (size); | |
824 } | |
825 | |
826 void | |
827 _free (ptr) | |
828 __ptr_t ptr; | |
829 { | |
830 free (ptr); | |
831 } | |
832 | |
833 __ptr_t | |
834 _realloc (ptr, size) | |
835 __ptr_t ptr; | |
836 __malloc_size_t size; | |
837 { | |
838 return realloc (ptr, size); | |
839 } | |
840 | |
841 #endif | |
842 /* Free a block of memory allocated by `malloc'. | |
843 Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc. | |
844 Written May 1989 by Mike Haertel. | |
845 | |
846 This library is free software; you can redistribute it and/or | |
847 modify it under the terms of the GNU Library General Public License as | |
848 published by the Free Software Foundation; either version 2 of the | |
849 License, or (at your option) any later version. | |
850 | |
851 This library is distributed in the hope that it will be useful, | |
852 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
853 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
854 Library General Public License for more details. | |
855 | |
856 You should have received a copy of the GNU Library General Public | |
857 License along with this library; see the file COPYING.LIB. If | |
858 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
859 Cambridge, MA 02139, USA. | |
860 | |
861 The author may be reached (Email) at the address mike@ai.mit.edu, | |
862 or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
863 | |
864 #ifndef _MALLOC_INTERNAL | |
865 #define _MALLOC_INTERNAL | |
866 #include <malloc.h> | |
867 #endif | |
868 | |
869 | |
870 /* Cope with systems lacking `memmove'. */ | |
871 #ifndef memmove | |
872 #if (defined (MEMMOVE_MISSING) || \ | |
873 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG)) | |
874 #ifdef emacs | |
875 #undef __malloc_safe_bcopy | |
876 #define __malloc_safe_bcopy safe_bcopy | |
877 #endif | |
878 /* This function is defined in realloc.c. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
879 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t)); |
17130 | 880 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size)) |
881 #endif | |
882 #endif | |
883 | |
884 | |
885 /* Debugging hook for free. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
886 void (*__free_hook) PP ((__ptr_t __ptr)); |
17130 | 887 |
888 /* List of blocks allocated by memalign. */ | |
889 struct alignlist *_aligned_blocks = NULL; | |
890 | |
891 /* Return memory to the heap. | |
892 Like `free' but don't call a __free_hook if there is one. */ | |
893 void | |
894 _free_internal (ptr) | |
895 __ptr_t ptr; | |
896 { | |
897 int type; | |
898 __malloc_size_t block, blocks; | |
899 register __malloc_size_t i; | |
900 struct list *prev, *next; | |
901 __ptr_t curbrk; | |
902 const __malloc_size_t lesscore_threshold | |
903 /* Threshold of free space at which we will return some to the system. */ | |
904 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks; | |
905 | |
906 register struct alignlist *l; | |
907 | |
908 if (ptr == NULL) | |
909 return; | |
910 | |
911 for (l = _aligned_blocks; l != NULL; l = l->next) | |
912 if (l->aligned == ptr) | |
913 { | |
914 l->aligned = NULL; /* Mark the slot in the list as free. */ | |
915 ptr = l->exact; | |
916 break; | |
917 } | |
918 | |
919 block = BLOCK (ptr); | |
920 | |
921 type = _heapinfo[block].busy.type; | |
922 switch (type) | |
923 { | |
924 case 0: | |
925 /* Get as many statistics as early as we can. */ | |
926 --_chunks_used; | |
927 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE; | |
928 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE; | |
929 | |
930 /* Find the free cluster previous to this one in the free list. | |
931 Start searching at the last block referenced; this may benefit | |
932 programs with locality of allocation. */ | |
933 i = _heapindex; | |
934 if (i > block) | |
935 while (i > block) | |
936 i = _heapinfo[i].free.prev; | |
937 else | |
938 { | |
939 do | |
940 i = _heapinfo[i].free.next; | |
941 while (i > 0 && i < block); | |
942 i = _heapinfo[i].free.prev; | |
943 } | |
944 | |
945 /* Determine how to link this block into the free list. */ | |
946 if (block == i + _heapinfo[i].free.size) | |
947 { | |
948 /* Coalesce this block with its predecessor. */ | |
949 _heapinfo[i].free.size += _heapinfo[block].busy.info.size; | |
950 block = i; | |
951 } | |
952 else | |
953 { | |
954 /* Really link this block back into the free list. */ | |
955 _heapinfo[block].free.size = _heapinfo[block].busy.info.size; | |
956 _heapinfo[block].free.next = _heapinfo[i].free.next; | |
957 _heapinfo[block].free.prev = i; | |
958 _heapinfo[i].free.next = block; | |
959 _heapinfo[_heapinfo[block].free.next].free.prev = block; | |
960 ++_chunks_free; | |
961 } | |
962 | |
963 /* Now that the block is linked in, see if we can coalesce it | |
964 with its successor (by deleting its successor from the list | |
965 and adding in its size). */ | |
966 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next) | |
967 { | |
968 _heapinfo[block].free.size | |
969 += _heapinfo[_heapinfo[block].free.next].free.size; | |
970 _heapinfo[block].free.next | |
971 = _heapinfo[_heapinfo[block].free.next].free.next; | |
972 _heapinfo[_heapinfo[block].free.next].free.prev = block; | |
973 --_chunks_free; | |
974 } | |
975 | |
976 /* How many trailing free blocks are there now? */ | |
977 blocks = _heapinfo[block].free.size; | |
978 | |
979 /* Where is the current end of accessible core? */ | |
980 curbrk = (*__morecore) (0); | |
981 | |
982 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit)) | |
983 { | |
984 /* The end of the malloc heap is at the end of accessible core. | |
985 It's possible that moving _heapinfo will allow us to | |
986 return some space to the system. */ | |
987 | |
988 __malloc_size_t info_block = BLOCK (_heapinfo); | |
989 __malloc_size_t info_blocks = _heapinfo[info_block].busy.info.size; | |
990 __malloc_size_t prev_block = _heapinfo[block].free.prev; | |
991 __malloc_size_t prev_blocks = _heapinfo[prev_block].free.size; | |
992 __malloc_size_t next_block = _heapinfo[block].free.next; | |
993 __malloc_size_t next_blocks = _heapinfo[next_block].free.size; | |
994 | |
995 if (/* Win if this block being freed is last in core, the info table | |
996 is just before it, the previous free block is just before the | |
997 info table, and the two free blocks together form a useful | |
998 amount to return to the system. */ | |
999 (block + blocks == _heaplimit && | |
1000 info_block + info_blocks == block && | |
1001 prev_block != 0 && prev_block + prev_blocks == info_block && | |
1002 blocks + prev_blocks >= lesscore_threshold) || | |
1003 /* Nope, not the case. We can also win if this block being | |
1004 freed is just before the info table, and the table extends | |
1005 to the end of core or is followed only by a free block, | |
1006 and the total free space is worth returning to the system. */ | |
1007 (block + blocks == info_block && | |
1008 ((info_block + info_blocks == _heaplimit && | |
1009 blocks >= lesscore_threshold) || | |
1010 (info_block + info_blocks == next_block && | |
1011 next_block + next_blocks == _heaplimit && | |
1012 blocks + next_blocks >= lesscore_threshold))) | |
1013 ) | |
1014 { | |
1015 malloc_info *newinfo; | |
1016 __malloc_size_t oldlimit = _heaplimit; | |
1017 | |
1018 /* Free the old info table, clearing _heaplimit to avoid | |
1019 recursion into this code. We don't want to return the | |
1020 table's blocks to the system before we have copied them to | |
1021 the new location. */ | |
1022 _heaplimit = 0; | |
1023 _free_internal (_heapinfo); | |
1024 _heaplimit = oldlimit; | |
1025 | |
1026 /* Tell malloc to search from the beginning of the heap for | |
1027 free blocks, so it doesn't reuse the ones just freed. */ | |
1028 _heapindex = 0; | |
1029 | |
1030 /* Allocate new space for the info table and move its data. */ | |
1031 newinfo = (malloc_info *) _malloc_internal (info_blocks | |
1032 * BLOCKSIZE); | |
1033 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE); | |
1034 _heapinfo = newinfo; | |
1035 | |
1036 /* We should now have coalesced the free block with the | |
1037 blocks freed from the old info table. Examine the entire | |
1038 trailing free block to decide below whether to return some | |
1039 to the system. */ | |
1040 block = _heapinfo[0].free.prev; | |
1041 blocks = _heapinfo[block].free.size; | |
1042 } | |
1043 | |
1044 /* Now see if we can return stuff to the system. */ | |
1045 if (block + blocks == _heaplimit && blocks >= lesscore_threshold) | |
1046 { | |
1047 register __malloc_size_t bytes = blocks * BLOCKSIZE; | |
1048 _heaplimit -= blocks; | |
1049 (*__morecore) (-bytes); | |
1050 _heapinfo[_heapinfo[block].free.prev].free.next | |
1051 = _heapinfo[block].free.next; | |
1052 _heapinfo[_heapinfo[block].free.next].free.prev | |
1053 = _heapinfo[block].free.prev; | |
1054 block = _heapinfo[block].free.prev; | |
1055 --_chunks_free; | |
1056 _bytes_free -= bytes; | |
1057 } | |
1058 } | |
1059 | |
1060 /* Set the next search to begin at this block. */ | |
1061 _heapindex = block; | |
1062 break; | |
1063 | |
1064 default: | |
1065 /* Do some of the statistics. */ | |
1066 --_chunks_used; | |
1067 _bytes_used -= 1 << type; | |
1068 ++_chunks_free; | |
1069 _bytes_free += 1 << type; | |
1070 | |
1071 /* Get the address of the first free fragment in this block. */ | |
1072 prev = (struct list *) ((char *) ADDRESS (block) + | |
1073 (_heapinfo[block].busy.info.frag.first << type)); | |
1074 | |
1075 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1) | |
1076 { | |
1077 /* If all fragments of this block are free, remove them | |
1078 from the fragment list and free the whole block. */ | |
1079 next = prev; | |
1080 for (i = 1; i < (__malloc_size_t) (BLOCKSIZE >> type); ++i) | |
1081 next = next->next; | |
1082 prev->prev->next = next; | |
1083 if (next != NULL) | |
1084 next->prev = prev->prev; | |
1085 _heapinfo[block].busy.type = 0; | |
1086 _heapinfo[block].busy.info.size = 1; | |
1087 | |
1088 /* Keep the statistics accurate. */ | |
1089 ++_chunks_used; | |
1090 _bytes_used += BLOCKSIZE; | |
1091 _chunks_free -= BLOCKSIZE >> type; | |
1092 _bytes_free -= BLOCKSIZE; | |
1093 | |
1094 free (ADDRESS (block)); | |
1095 } | |
1096 else if (_heapinfo[block].busy.info.frag.nfree != 0) | |
1097 { | |
1098 /* If some fragments of this block are free, link this | |
1099 fragment into the fragment list after the first free | |
1100 fragment of this block. */ | |
1101 next = (struct list *) ptr; | |
1102 next->next = prev->next; | |
1103 next->prev = prev; | |
1104 prev->next = next; | |
1105 if (next->next != NULL) | |
1106 next->next->prev = next; | |
1107 ++_heapinfo[block].busy.info.frag.nfree; | |
1108 } | |
1109 else | |
1110 { | |
1111 /* No fragments of this block are free, so link this | |
1112 fragment into the fragment list and announce that | |
1113 it is the first free fragment of this block. */ | |
1114 prev = (struct list *) ptr; | |
1115 _heapinfo[block].busy.info.frag.nfree = 1; | |
1116 _heapinfo[block].busy.info.frag.first = (unsigned long int) | |
1117 ((unsigned long int) ((char *) ptr - (char *) NULL) | |
1118 % BLOCKSIZE >> type); | |
1119 prev->next = _fraghead[type].next; | |
1120 prev->prev = &_fraghead[type]; | |
1121 prev->prev->next = prev; | |
1122 if (prev->next != NULL) | |
1123 prev->next->prev = prev; | |
1124 } | |
1125 break; | |
1126 } | |
1127 } | |
1128 | |
1129 /* Return memory to the heap. */ | |
26526
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
1130 |
b7438760079b
* callproc.c (strerror): Remove decl.
Paul Eggert <eggert@twinsun.com>
parents:
26088
diff
changeset
|
1131 FREE_RETURN_TYPE |
17130 | 1132 free (ptr) |
1133 __ptr_t ptr; | |
1134 { | |
1135 if (__free_hook != NULL) | |
1136 (*__free_hook) (ptr); | |
1137 else | |
1138 _free_internal (ptr); | |
1139 } | |
1140 | |
1141 /* Define the `cfree' alias for `free'. */ | |
1142 #ifdef weak_alias | |
1143 weak_alias (free, cfree) | |
1144 #else | |
1145 void | |
1146 cfree (ptr) | |
1147 __ptr_t ptr; | |
1148 { | |
1149 free (ptr); | |
1150 } | |
1151 #endif | |
1152 /* Change the size of a block allocated by `malloc'. | |
1153 Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. | |
1154 Written May 1989 by Mike Haertel. | |
1155 | |
1156 This library is free software; you can redistribute it and/or | |
1157 modify it under the terms of the GNU Library General Public License as | |
1158 published by the Free Software Foundation; either version 2 of the | |
1159 License, or (at your option) any later version. | |
1160 | |
1161 This library is distributed in the hope that it will be useful, | |
1162 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
1163 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
1164 Library General Public License for more details. | |
1165 | |
1166 You should have received a copy of the GNU Library General Public | |
1167 License along with this library; see the file COPYING.LIB. If | |
1168 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
1169 Cambridge, MA 02139, USA. | |
1170 | |
1171 The author may be reached (Email) at the address mike@ai.mit.edu, | |
1172 or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
1173 | |
1174 #ifndef _MALLOC_INTERNAL | |
1175 #define _MALLOC_INTERNAL | |
1176 #include <malloc.h> | |
1177 #endif | |
1178 | |
1179 | |
1180 | |
1181 /* Cope with systems lacking `memmove'. */ | |
1182 #if (defined (MEMMOVE_MISSING) || \ | |
1183 !defined(_LIBC) && !defined(STDC_HEADERS) && !defined(USG)) | |
1184 | |
1185 #ifdef emacs | |
1186 #undef __malloc_safe_bcopy | |
1187 #define __malloc_safe_bcopy safe_bcopy | |
1188 #else | |
1189 | |
1190 /* Snarfed directly from Emacs src/dispnew.c: | |
1191 XXX Should use system bcopy if it handles overlap. */ | |
1192 | |
1193 /* Like bcopy except never gets confused by overlap. */ | |
1194 | |
1195 void | |
1196 __malloc_safe_bcopy (afrom, ato, size) | |
1197 __ptr_t afrom; | |
1198 __ptr_t ato; | |
1199 __malloc_size_t size; | |
1200 { | |
1201 char *from = afrom, *to = ato; | |
1202 | |
1203 if (size <= 0 || from == to) | |
1204 return; | |
1205 | |
1206 /* If the source and destination don't overlap, then bcopy can | |
1207 handle it. If they do overlap, but the destination is lower in | |
1208 memory than the source, we'll assume bcopy can handle that. */ | |
1209 if (to < from || from + size <= to) | |
1210 bcopy (from, to, size); | |
1211 | |
1212 /* Otherwise, we'll copy from the end. */ | |
1213 else | |
1214 { | |
1215 register char *endf = from + size; | |
1216 register char *endt = to + size; | |
1217 | |
1218 /* If TO - FROM is large, then we should break the copy into | |
1219 nonoverlapping chunks of TO - FROM bytes each. However, if | |
1220 TO - FROM is small, then the bcopy function call overhead | |
1221 makes this not worth it. The crossover point could be about | |
1222 anywhere. Since I don't think the obvious copy loop is too | |
1223 bad, I'm trying to err in its favor. */ | |
1224 if (to - from < 64) | |
1225 { | |
1226 do | |
1227 *--endt = *--endf; | |
1228 while (endf != from); | |
1229 } | |
1230 else | |
1231 { | |
1232 for (;;) | |
1233 { | |
1234 endt -= (to - from); | |
1235 endf -= (to - from); | |
1236 | |
1237 if (endt < to) | |
1238 break; | |
1239 | |
1240 bcopy (endf, endt, to - from); | |
1241 } | |
1242 | |
1243 /* If SIZE wasn't a multiple of TO - FROM, there will be a | |
1244 little left over. The amount left over is | |
1245 (endt + (to - from)) - to, which is endt - from. */ | |
1246 bcopy (from, to, endt - from); | |
1247 } | |
1248 } | |
1249 } | |
1250 #endif /* emacs */ | |
1251 | |
1252 #ifndef memmove | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
1253 extern void __malloc_safe_bcopy PP ((__ptr_t, __ptr_t, __malloc_size_t)); |
17130 | 1254 #define memmove(to, from, size) __malloc_safe_bcopy ((from), (to), (size)) |
1255 #endif | |
1256 | |
1257 #endif | |
1258 | |
1259 | |
1260 #define min(A, B) ((A) < (B) ? (A) : (B)) | |
1261 | |
1262 /* Debugging hook for realloc. */ | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
1263 __ptr_t (*__realloc_hook) PP ((__ptr_t __ptr, __malloc_size_t __size)); |
17130 | 1264 |
1265 /* Resize the given region to the new size, returning a pointer | |
1266 to the (possibly moved) region. This is optimized for speed; | |
1267 some benchmarks seem to indicate that greater compactness is | |
1268 achieved by unconditionally allocating and copying to a | |
1269 new region. This module has incestuous knowledge of the | |
1270 internals of both free and malloc. */ | |
1271 __ptr_t | |
1272 _realloc_internal (ptr, size) | |
1273 __ptr_t ptr; | |
1274 __malloc_size_t size; | |
1275 { | |
1276 __ptr_t result; | |
1277 int type; | |
1278 __malloc_size_t block, blocks, oldlimit; | |
1279 | |
1280 if (size == 0) | |
1281 { | |
1282 _free_internal (ptr); | |
1283 return _malloc_internal (0); | |
1284 } | |
1285 else if (ptr == NULL) | |
1286 return _malloc_internal (size); | |
1287 | |
1288 block = BLOCK (ptr); | |
1289 | |
1290 type = _heapinfo[block].busy.type; | |
1291 switch (type) | |
1292 { | |
1293 case 0: | |
1294 /* Maybe reallocate a large block to a small fragment. */ | |
1295 if (size <= BLOCKSIZE / 2) | |
1296 { | |
1297 result = _malloc_internal (size); | |
1298 if (result != NULL) | |
1299 { | |
1300 memcpy (result, ptr, size); | |
1301 _free_internal (ptr); | |
1302 return result; | |
1303 } | |
1304 } | |
1305 | |
1306 /* The new size is a large allocation as well; | |
1307 see if we can hold it in place. */ | |
1308 blocks = BLOCKIFY (size); | |
1309 if (blocks < _heapinfo[block].busy.info.size) | |
1310 { | |
1311 /* The new size is smaller; return | |
1312 excess memory to the free list. */ | |
1313 _heapinfo[block + blocks].busy.type = 0; | |
1314 _heapinfo[block + blocks].busy.info.size | |
1315 = _heapinfo[block].busy.info.size - blocks; | |
1316 _heapinfo[block].busy.info.size = blocks; | |
1317 /* We have just created a new chunk by splitting a chunk in two. | |
1318 Now we will free this chunk; increment the statistics counter | |
1319 so it doesn't become wrong when _free_internal decrements it. */ | |
1320 ++_chunks_used; | |
1321 _free_internal (ADDRESS (block + blocks)); | |
1322 result = ptr; | |
1323 } | |
1324 else if (blocks == _heapinfo[block].busy.info.size) | |
1325 /* No size change necessary. */ | |
1326 result = ptr; | |
1327 else | |
1328 { | |
1329 /* Won't fit, so allocate a new region that will. | |
1330 Free the old region first in case there is sufficient | |
1331 adjacent free space to grow without moving. */ | |
1332 blocks = _heapinfo[block].busy.info.size; | |
1333 /* Prevent free from actually returning memory to the system. */ | |
1334 oldlimit = _heaplimit; | |
1335 _heaplimit = 0; | |
1336 _free_internal (ptr); | |
1337 result = _malloc_internal (size); | |
1338 if (_heaplimit == 0) | |
1339 _heaplimit = oldlimit; | |
1340 if (result == NULL) | |
1341 { | |
1342 /* Now we're really in trouble. We have to unfree | |
1343 the thing we just freed. Unfortunately it might | |
1344 have been coalesced with its neighbors. */ | |
1345 if (_heapindex == block) | |
1346 (void) _malloc_internal (blocks * BLOCKSIZE); | |
1347 else | |
1348 { | |
1349 __ptr_t previous | |
1350 = _malloc_internal ((block - _heapindex) * BLOCKSIZE); | |
1351 (void) _malloc_internal (blocks * BLOCKSIZE); | |
1352 _free_internal (previous); | |
1353 } | |
1354 return NULL; | |
1355 } | |
1356 if (ptr != result) | |
1357 memmove (result, ptr, blocks * BLOCKSIZE); | |
1358 } | |
1359 break; | |
1360 | |
1361 default: | |
1362 /* Old size is a fragment; type is logarithm | |
1363 to base two of the fragment size. */ | |
1364 if (size > (__malloc_size_t) (1 << (type - 1)) && | |
1365 size <= (__malloc_size_t) (1 << type)) | |
1366 /* The new size is the same kind of fragment. */ | |
1367 result = ptr; | |
1368 else | |
1369 { | |
1370 /* The new size is different; allocate a new space, | |
1371 and copy the lesser of the new size and the old. */ | |
1372 result = _malloc_internal (size); | |
1373 if (result == NULL) | |
1374 return NULL; | |
1375 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type)); | |
1376 _free_internal (ptr); | |
1377 } | |
1378 break; | |
1379 } | |
1380 | |
1381 return result; | |
1382 } | |
1383 | |
1384 __ptr_t | |
1385 realloc (ptr, size) | |
1386 __ptr_t ptr; | |
1387 __malloc_size_t size; | |
1388 { | |
1389 if (!__malloc_initialized && !__malloc_initialize ()) | |
1390 return NULL; | |
1391 | |
1392 return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal) | |
1393 (ptr, size); | |
1394 } | |
1395 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc. | |
1396 | |
1397 This library is free software; you can redistribute it and/or | |
1398 modify it under the terms of the GNU Library General Public License as | |
1399 published by the Free Software Foundation; either version 2 of the | |
1400 License, or (at your option) any later version. | |
1401 | |
1402 This library is distributed in the hope that it will be useful, | |
1403 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
1404 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
1405 Library General Public License for more details. | |
1406 | |
1407 You should have received a copy of the GNU Library General Public | |
1408 License along with this library; see the file COPYING.LIB. If | |
1409 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
1410 Cambridge, MA 02139, USA. | |
1411 | |
1412 The author may be reached (Email) at the address mike@ai.mit.edu, | |
1413 or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
1414 | |
1415 #ifndef _MALLOC_INTERNAL | |
1416 #define _MALLOC_INTERNAL | |
1417 #include <malloc.h> | |
1418 #endif | |
1419 | |
1420 /* Allocate an array of NMEMB elements each SIZE bytes long. | |
1421 The entire array is initialized to zeros. */ | |
1422 __ptr_t | |
1423 calloc (nmemb, size) | |
1424 register __malloc_size_t nmemb; | |
1425 register __malloc_size_t size; | |
1426 { | |
1427 register __ptr_t result = malloc (nmemb * size); | |
1428 | |
1429 if (result != NULL) | |
1430 (void) memset (result, 0, nmemb * size); | |
1431 | |
1432 return result; | |
1433 } | |
1434 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. | |
1435 This file is part of the GNU C Library. | |
1436 | |
1437 The GNU C Library is free software; you can redistribute it and/or modify | |
1438 it under the terms of the GNU General Public License as published by | |
1439 the Free Software Foundation; either version 2, or (at your option) | |
1440 any later version. | |
1441 | |
1442 The GNU C Library is distributed in the hope that it will be useful, | |
1443 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
1444 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
1445 GNU General Public License for more details. | |
1446 | |
1447 You should have received a copy of the GNU General Public License | |
1448 along with the GNU C Library; see the file COPYING. If not, write to | |
1449 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ | |
1450 | |
1451 #ifndef _MALLOC_INTERNAL | |
1452 #define _MALLOC_INTERNAL | |
1453 #include <malloc.h> | |
1454 #endif | |
1455 | |
1456 #ifndef __GNU_LIBRARY__ | |
1457 #define __sbrk sbrk | |
1458 #endif | |
1459 | |
1460 #ifdef __GNU_LIBRARY__ | |
1461 /* It is best not to declare this and cast its result on foreign operating | |
1462 systems with potentially hostile include files. */ | |
1463 | |
1464 #include <stddef.h> | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
1465 extern __ptr_t __sbrk PP ((ptrdiff_t increment)); |
17130 | 1466 #endif |
1467 | |
1468 #ifndef NULL | |
1469 #define NULL 0 | |
1470 #endif | |
1471 | |
1472 /* Allocate INCREMENT more bytes of data space, | |
1473 and return the start of data space, or NULL on errors. | |
1474 If INCREMENT is negative, shrink data space. */ | |
1475 __ptr_t | |
1476 __default_morecore (increment) | |
1477 __malloc_ptrdiff_t increment; | |
1478 { | |
1479 __ptr_t result = (__ptr_t) __sbrk (increment); | |
1480 if (result == (__ptr_t) -1) | |
1481 return NULL; | |
1482 return result; | |
1483 } | |
1484 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc. | |
1485 | |
1486 This library is free software; you can redistribute it and/or | |
1487 modify it under the terms of the GNU Library General Public License as | |
1488 published by the Free Software Foundation; either version 2 of the | |
1489 License, or (at your option) any later version. | |
1490 | |
1491 This library is distributed in the hope that it will be useful, | |
1492 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
1493 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
1494 Library General Public License for more details. | |
1495 | |
1496 You should have received a copy of the GNU Library General Public | |
1497 License along with this library; see the file COPYING.LIB. If | |
1498 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
1499 Cambridge, MA 02139, USA. */ | |
1500 | |
1501 #ifndef _MALLOC_INTERNAL | |
1502 #define _MALLOC_INTERNAL | |
1503 #include <malloc.h> | |
1504 #endif | |
1505 | |
1506 #if __DJGPP__ - 0 == 1 | |
1507 | |
1508 /* There is some problem with memalign in DJGPP v1 and we are supposed | |
1509 to omit it. Noone told me why, they just told me to do it. */ | |
1510 | |
1511 #else | |
1512 | |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
1513 __ptr_t (*__memalign_hook) PP ((size_t __size, size_t __alignment)); |
17130 | 1514 |
1515 __ptr_t | |
1516 memalign (alignment, size) | |
1517 __malloc_size_t alignment; | |
1518 __malloc_size_t size; | |
1519 { | |
1520 __ptr_t result; | |
1521 unsigned long int adj, lastadj; | |
1522 | |
1523 if (__memalign_hook) | |
1524 return (*__memalign_hook) (alignment, size); | |
1525 | |
1526 /* Allocate a block with enough extra space to pad the block with up to | |
1527 (ALIGNMENT - 1) bytes if necessary. */ | |
1528 result = malloc (size + alignment - 1); | |
1529 if (result == NULL) | |
1530 return NULL; | |
1531 | |
1532 /* Figure out how much we will need to pad this particular block | |
1533 to achieve the required alignment. */ | |
1534 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment; | |
1535 | |
1536 do | |
1537 { | |
1538 /* Reallocate the block with only as much excess as it needs. */ | |
1539 free (result); | |
1540 result = malloc (adj + size); | |
1541 if (result == NULL) /* Impossible unless interrupted. */ | |
1542 return NULL; | |
1543 | |
1544 lastadj = adj; | |
1545 adj = (unsigned long int) ((char *) result - (char *) NULL) % alignment; | |
1546 /* It's conceivable we might have been so unlucky as to get a | |
1547 different block with weaker alignment. If so, this block is too | |
1548 short to contain SIZE after alignment correction. So we must | |
1549 try again and get another block, slightly larger. */ | |
1550 } while (adj > lastadj); | |
1551 | |
1552 if (adj != 0) | |
1553 { | |
1554 /* Record this block in the list of aligned blocks, so that `free' | |
1555 can identify the pointer it is passed, which will be in the middle | |
1556 of an allocated block. */ | |
1557 | |
1558 struct alignlist *l; | |
1559 for (l = _aligned_blocks; l != NULL; l = l->next) | |
1560 if (l->aligned == NULL) | |
1561 /* This slot is free. Use it. */ | |
1562 break; | |
1563 if (l == NULL) | |
1564 { | |
1565 l = (struct alignlist *) malloc (sizeof (struct alignlist)); | |
1566 if (l == NULL) | |
1567 { | |
1568 free (result); | |
1569 return NULL; | |
1570 } | |
1571 l->next = _aligned_blocks; | |
1572 _aligned_blocks = l; | |
1573 } | |
1574 l->exact = result; | |
1575 result = l->aligned = (char *) result + alignment - adj; | |
1576 } | |
1577 | |
1578 return result; | |
1579 } | |
1580 | |
1581 #endif /* Not DJGPP v1 */ | |
1582 /* Allocate memory on a page boundary. | |
1583 Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc. | |
1584 | |
1585 This library is free software; you can redistribute it and/or | |
1586 modify it under the terms of the GNU Library General Public License as | |
1587 published by the Free Software Foundation; either version 2 of the | |
1588 License, or (at your option) any later version. | |
1589 | |
1590 This library is distributed in the hope that it will be useful, | |
1591 but WITHOUT ANY WARRANTY; without even the implied warranty of | |
1592 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
1593 Library General Public License for more details. | |
1594 | |
1595 You should have received a copy of the GNU Library General Public | |
1596 License along with this library; see the file COPYING.LIB. If | |
1597 not, write to the Free Software Foundation, Inc., 675 Mass Ave, | |
1598 Cambridge, MA 02139, USA. | |
1599 | |
1600 The author may be reached (Email) at the address mike@ai.mit.edu, | |
1601 or (US mail) as Mike Haertel c/o Free Software Foundation. */ | |
1602 | |
1603 #if defined (_MALLOC_INTERNAL) && defined (GMALLOC_INHIBIT_VALLOC) | |
1604 | |
1605 /* Emacs defines GMALLOC_INHIBIT_VALLOC to avoid this definition | |
1606 on MSDOS, where it conflicts with a system header file. */ | |
1607 | |
1608 #define ELIDE_VALLOC | |
1609 | |
1610 #endif | |
1611 | |
1612 #ifndef ELIDE_VALLOC | |
1613 | |
1614 #if defined (__GNU_LIBRARY__) || defined (_LIBC) | |
1615 #include <stddef.h> | |
1616 #include <sys/cdefs.h> | |
17131
6ff1e0aec51e
[__GLIBC__ >= 2]: Don't declare __getpagesize.
Karl Heuer <kwzh@gnu.org>
parents:
17130
diff
changeset
|
1617 #if defined (__GLIBC__) && __GLIBC__ >= 2 |
6ff1e0aec51e
[__GLIBC__ >= 2]: Don't declare __getpagesize.
Karl Heuer <kwzh@gnu.org>
parents:
17130
diff
changeset
|
1618 /* __getpagesize is already declared in <unistd.h> with return type int */ |
6ff1e0aec51e
[__GLIBC__ >= 2]: Don't declare __getpagesize.
Karl Heuer <kwzh@gnu.org>
parents:
17130
diff
changeset
|
1619 #else |
18667
d4f53287fc5b
Rename macro __P to PP.
Richard M. Stallman <rms@gnu.org>
parents:
17131
diff
changeset
|
1620 extern size_t __getpagesize PP ((void)); |
17131
6ff1e0aec51e
[__GLIBC__ >= 2]: Don't declare __getpagesize.
Karl Heuer <kwzh@gnu.org>
parents:
17130
diff
changeset
|
1621 #endif |
17130 | 1622 #else |
1623 #include "getpagesize.h" | |
1624 #define __getpagesize() getpagesize() | |
1625 #endif | |
1626 | |
1627 #ifndef _MALLOC_INTERNAL | |
1628 #define _MALLOC_INTERNAL | |
1629 #include <malloc.h> | |
1630 #endif | |
1631 | |
1632 static __malloc_size_t pagesize; | |
1633 | |
1634 __ptr_t | |
1635 valloc (size) | |
1636 __malloc_size_t size; | |
1637 { | |
1638 if (pagesize == 0) | |
1639 pagesize = __getpagesize (); | |
1640 | |
1641 return memalign (pagesize, size); | |
1642 } | |
1643 | |
1644 #endif /* Not ELIDE_VALLOC. */ |