comparison src/ralloc.c @ 9459:a1569f00a6a6

Install Hiroshi Nakano's rewrite to allow multiple heaps, for implementations where the C library makes calls to sbrk directly.
author Karl Heuer <kwzh@gnu.org>
date Wed, 12 Oct 1994 00:48:03 +0000
parents b628561b185b
children 134f7085c56b
comparison
equal deleted inserted replaced
9458:a6d5f1c10986 9459:a1569f00a6a6
94 static POINTER virtual_break_value; 94 static POINTER virtual_break_value;
95 95
96 /* The break value, viewed by the relocatable blocs. */ 96 /* The break value, viewed by the relocatable blocs. */
97 static POINTER break_value; 97 static POINTER break_value;
98 98
99 /* The REAL (i.e., page aligned) break value of the process. */
100 static POINTER page_break_value;
101
102 /* This is the size of a page. We round memory requests to this boundary. */ 99 /* This is the size of a page. We round memory requests to this boundary. */
103 static int page_size; 100 static int page_size;
104 101
105 /* Whenever we get memory from the system, get this many extra bytes. This 102 /* Whenever we get memory from the system, get this many extra bytes. This
106 must be a multiple of page_size. */ 103 must be a multiple of page_size. */
111 #define PAGE (getpagesize ()) 108 #define PAGE (getpagesize ())
112 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0) 109 #define ALIGNED(addr) (((unsigned long int) (addr) & (page_size - 1)) == 0)
113 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \ 110 #define ROUNDUP(size) (((unsigned long int) (size) + page_size - 1) \
114 & ~(page_size - 1)) 111 & ~(page_size - 1))
115 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1))) 112 #define ROUND_TO_PAGE(addr) (addr & (~(page_size - 1)))
113
114 #define MEM_ALIGN sizeof(double)
115 #define MEM_ROUNDUP(addr) (((unsigned long int)(addr) + MEM_ALIGN - 1) \
116 & ~(MEM_ALIGN - 1))
116 117
117 /* Functions to get and return memory from the system. */ 118 /* Data structures of heaps and blocs */
118 119 typedef struct heap
119 /* Obtain SIZE bytes of space. If enough space is not presently available 120 {
120 in our process reserve, (i.e., (page_break_value - break_value)), 121 struct heap *next;
121 this means getting more page-aligned space from the system. 122 struct heap *prev;
122 123 POINTER start;
123 Return non-zero if all went well, or zero if we couldn't allocate 124 POINTER end;
124 the memory. */ 125 POINTER bloc_start; /* start of relocatable blocs */
125 static int 126 } *heap_ptr;
126 obtain (size) 127
127 SIZE size; 128 #define NIL_HEAP ((heap_ptr) 0)
128 { 129 #define HEAP_PTR_SIZE (sizeof (struct heap))
129 SIZE already_available = page_break_value - break_value; 130
130 131 /* Head and tail of the list of heaps. */
131 if (already_available < size) 132 static heap_ptr first_heap, last_heap;
132 {
133 SIZE get = ROUNDUP (size - already_available);
134 /* Get some extra, so we can come here less often. */
135 get += extra_bytes;
136
137 if ((*real_morecore) (get) == 0)
138 return 0;
139
140 page_break_value += get;
141 }
142
143 break_value += size;
144
145 return 1;
146 }
147
148 /* Obtain SIZE bytes of space and return a pointer to the new area.
149 If we could not allocate the space, return zero. */
150
151 static POINTER
152 get_more_space (size)
153 SIZE size;
154 {
155 POINTER ptr = break_value;
156 if (obtain (size))
157 return ptr;
158 else
159 return 0;
160 }
161
162 /* Note that SIZE bytes of space have been relinquished by the process.
163 If SIZE is more than a page, return the space to the system. */
164
165 static void
166 relinquish (size)
167 SIZE size;
168 {
169 POINTER new_page_break;
170 int excess;
171
172 break_value -= size;
173 new_page_break = (POINTER) ROUNDUP (break_value);
174 excess = (char *) page_break_value - (char *) new_page_break;
175
176 if (excess > extra_bytes * 2)
177 {
178 /* Keep extra_bytes worth of empty space.
179 And don't free anything unless we can free at least extra_bytes. */
180 if ((*real_morecore) (extra_bytes - excess) == 0)
181 abort ();
182
183 page_break_value += extra_bytes - excess;
184 }
185
186 /* Zero the space from the end of the "official" break to the actual
187 break, so that bugs show up faster. */
188 bzero (break_value, ((char *) page_break_value - (char *) break_value));
189 }
190
191 /* The meat - allocating, freeing, and relocating blocs. */
192 133
193 /* These structures are allocated in the malloc arena. 134 /* These structures are allocated in the malloc arena.
194 The linked list is kept in order of increasing '.data' members. 135 The linked list is kept in order of increasing '.data' members.
195 The data blocks abut each other; if b->next is non-nil, then 136 The data blocks abut each other; if b->next is non-nil, then
196 b->data + b->size == b->next->data. */ 137 b->data + b->size == b->next->data. */
199 struct bp *next; 140 struct bp *next;
200 struct bp *prev; 141 struct bp *prev;
201 POINTER *variable; 142 POINTER *variable;
202 POINTER data; 143 POINTER data;
203 SIZE size; 144 SIZE size;
145 POINTER new_data; /* tmporarily used for relocation */
204 } *bloc_ptr; 146 } *bloc_ptr;
205 147
206 #define NIL_BLOC ((bloc_ptr) 0) 148 #define NIL_BLOC ((bloc_ptr) 0)
207 #define BLOC_PTR_SIZE (sizeof (struct bp)) 149 #define BLOC_PTR_SIZE (sizeof (struct bp))
208 150
209 /* Head and tail of the list of relocatable blocs. */ 151 /* Head and tail of the list of relocatable blocs. */
210 static bloc_ptr first_bloc, last_bloc; 152 static bloc_ptr first_bloc, last_bloc;
153
154
155 /* Functions to get and return memory from the system. */
156
157 /* Obtain SIZE bytes of space starting at ADDRESS in a heap.
158 If enough space is not presently available in our reserve, this means
159 getting more page-aligned space from the system. If the retuned space
160 is not contiguos to the last heap, allocate a new heap, and append it
161 to the heap list.
162
163 Return the address of the space if all went well, or zero if we couldn't
164 allocate the memory. */
165 static POINTER
166 obtain (address, size)
167 POINTER address;
168 SIZE size;
169 {
170 heap_ptr heap;
171 SIZE already_available;
172
173 for (heap = last_heap; heap; heap = heap->prev)
174 {
175 if (heap->start <= address && address <= heap->end)
176 break;
177 }
178
179 if (! heap)
180 abort();
181
182 while (heap && address + size > heap->end)
183 {
184 heap = heap->next;
185 if (heap == NIL_HEAP)
186 break;
187 address = heap->bloc_start;
188 }
189
190 if (heap == NIL_HEAP)
191 {
192 POINTER new = (*real_morecore)(0);
193 SIZE get;
194
195 already_available = (char *)last_heap->end - (char *)address;
196
197 if (new != last_heap->end)
198 {
199 /* Someone else called sbrk(). */
200 heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP(new);
201 POINTER bloc_start = (POINTER) MEM_ROUNDUP((POINTER)(new_heap + 1));
202
203 if ((*real_morecore) (bloc_start - new) != new)
204 return 0;
205
206 new_heap->start = new;
207 new_heap->end = bloc_start;
208 new_heap->bloc_start = bloc_start;
209 new_heap->next = NIL_HEAP;
210 new_heap->prev = last_heap;
211 last_heap->next = new_heap;
212 last_heap = new_heap;
213
214 address = bloc_start;
215 already_available = 0;
216 }
217
218 /* Get some extra, so we can come here less often. */
219 get = size + extra_bytes - already_available;
220 get = (char *) ROUNDUP((char *)last_heap->end + get)
221 - (char *) last_heap->end;
222
223 if ((*real_morecore) (get) != last_heap->end)
224 return 0;
225
226 last_heap->end += get;
227 }
228
229 return address;
230 }
231
232 /* If the last heap has a excessive space, return it to the system. */
233 static void
234 relinquish ()
235 {
236 register heap_ptr h;
237 int excess = 0;
238
239 for (h = last_heap; h && break_value < h->end; h = h->prev)
240 {
241 excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
242 ? h->bloc_start : break_value);
243 }
244
245 if (excess > extra_bytes * 2 && (*real_morecore) (0) == last_heap->end)
246 {
247 /* Keep extra_bytes worth of empty space.
248 And don't free anything unless we can free at least extra_bytes. */
249 excess -= extra_bytes;
250
251 if ((char *)last_heap->end - (char *)last_heap->bloc_start <= excess)
252 {
253 /* Return the last heap with its header to the system */
254 excess = (char *)last_heap->end - (char *)last_heap->start;
255 last_heap = last_heap->prev;
256 last_heap->next = NIL_HEAP;
257 }
258 else
259 {
260 excess = (char *) last_heap->end
261 - (char *) ROUNDUP((char *)last_heap->end - excess);
262 last_heap->end -= excess;
263 }
264
265 if ((*real_morecore) (- excess) == 0)
266 abort ();
267 }
268 }
269
270 /* The meat - allocating, freeing, and relocating blocs. */
211 271
212 /* Find the bloc referenced by the address in PTR. Returns a pointer 272 /* Find the bloc referenced by the address in PTR. Returns a pointer
213 to that block. */ 273 to that block. */
214 274
215 static bloc_ptr 275 static bloc_ptr
238 SIZE size; 298 SIZE size;
239 { 299 {
240 register bloc_ptr new_bloc; 300 register bloc_ptr new_bloc;
241 301
242 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE)) 302 if (! (new_bloc = (bloc_ptr) malloc (BLOC_PTR_SIZE))
243 || ! (new_bloc->data = get_more_space (size))) 303 || ! (new_bloc->data = obtain (break_value, size)))
244 { 304 {
245 if (new_bloc) 305 if (new_bloc)
246 free (new_bloc); 306 free (new_bloc);
247 307
248 return 0; 308 return 0;
249 } 309 }
310
311 break_value = new_bloc->data + size;
250 312
251 new_bloc->size = size; 313 new_bloc->size = size;
252 new_bloc->next = NIL_BLOC; 314 new_bloc->next = NIL_BLOC;
253 new_bloc->variable = (POINTER *) NIL; 315 new_bloc->variable = (POINTER *) NIL;
316 new_bloc->new_data = 0;
254 317
255 if (first_bloc) 318 if (first_bloc)
256 { 319 {
257 new_bloc->prev = last_bloc; 320 new_bloc->prev = last_bloc;
258 last_bloc->next = new_bloc; 321 last_bloc->next = new_bloc;
265 } 328 }
266 329
267 return new_bloc; 330 return new_bloc;
268 } 331 }
269 332
270 /* Relocate all blocs from BLOC on upward in the list to the zone 333 /* Calculate new locations of blocs in the list begining with BLOC,
271 indicated by ADDRESS. Direction of relocation is determined by 334 whose spaces is started at ADDRESS in HEAP. If enough space is
272 the position of ADDRESS relative to BLOC->data. 335 not presently available in our reserve, obtain() is called for
273 336 more space.
274 If BLOC is NIL_BLOC, nothing is done. 337
275 338 Do not touch the contents of blocs or break_value. */
276 Note that ordering of blocs is not affected by this function. */ 339
277 340 static int
278 static void 341 relocate_blocs (bloc, heap, address)
279 relocate_some_blocs (bloc, address) 342 bloc_ptr bloc;
280 bloc_ptr bloc; 343 heap_ptr heap;
281 POINTER address; 344 POINTER address;
282 { 345 {
283 if (bloc != NIL_BLOC) 346 register bloc_ptr b = bloc;
284 { 347
285 register SIZE offset = address - bloc->data; 348 while (b)
286 register SIZE data_size = 0; 349 {
287 register bloc_ptr b; 350 while (heap && address + b->size > heap->end)
288 351 {
352 heap = heap->next;
353 if (heap == NIL_HEAP)
354 break;
355 address = heap->bloc_start;
356 }
357
358 if (heap == NIL_HEAP)
359 {
360 register bloc_ptr tb = b;
361 register SIZE s = 0;
362
363 while (tb != NIL_BLOC)
364 {
365 s += tb->size;
366 tb = tb->next;
367 }
368
369 if (! (address = obtain(address, s)))
370 return 0;
371
372 heap = last_heap;
373 }
374
375 b->new_data = address;
376 address += b->size;
377 b = b->next;
378 }
379
380 return 1;
381 }
382
383 /* Resize BLOC to SIZE bytes. */
384 static int
385 resize_bloc (bloc, size)
386 bloc_ptr bloc;
387 SIZE size;
388 {
389 register bloc_ptr b;
390 heap_ptr heap;
391 POINTER address;
392 SIZE old_size;
393
394 if (bloc == NIL_BLOC || size == bloc->size)
395 return 1;
396
397 for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
398 {
399 if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
400 break;
401 }
402
403 if (heap == NIL_HEAP)
404 abort();
405
406 old_size = bloc->size;
407 bloc->size = size;
408
409 /* Note that bloc could be moved into the previous heap. */
410 address = bloc->prev ? bloc->prev->data + bloc->prev->size
411 : first_heap->bloc_start;
412 while (heap)
413 {
414 if (heap->bloc_start <= address && address <= heap->end)
415 break;
416 heap = heap->prev;
417 }
418
419 if (! relocate_blocs (bloc, heap, address))
420 {
421 bloc->size = old_size;
422 return 0;
423 }
424
425 if (size > old_size)
426 {
427 for (b = last_bloc; b != bloc; b = b->prev)
428 {
429 safe_bcopy (b->data, b->new_data, b->size);
430 *b->variable = b->data = b->new_data;
431 }
432 safe_bcopy (bloc->data, bloc->new_data, old_size);
433 bzero (bloc->new_data + old_size, size - old_size);
434 *bloc->variable = bloc->data = bloc->new_data;
435 }
436 else
437 {
289 for (b = bloc; b != NIL_BLOC; b = b->next) 438 for (b = bloc; b != NIL_BLOC; b = b->next)
290 { 439 {
291 data_size += b->size; 440 safe_bcopy (b->data, b->new_data, b->size);
292 b->data += offset; 441 *b->variable = b->data = b->new_data;
293 *b->variable = b->data; 442 }
294 } 443 }
295 444
296 safe_bcopy (address - offset, address, data_size); 445 break_value = last_bloc ? last_bloc->data + last_bloc->size
297 } 446 : first_heap->bloc_start;
298 } 447 return 1;
299 448 }
300 449
301 /* Free BLOC from the chain of blocs, relocating any blocs above it 450 /* Free BLOC from the chain of blocs, relocating any blocs above it
302 and returning BLOC->size bytes to the free area. */ 451 and returning BLOC->size bytes to the free area. */
303 452
304 static void 453 static void
305 free_bloc (bloc) 454 free_bloc (bloc)
306 bloc_ptr bloc; 455 bloc_ptr bloc;
307 { 456 {
457 resize_bloc (bloc, 0);
458
308 if (bloc == first_bloc && bloc == last_bloc) 459 if (bloc == first_bloc && bloc == last_bloc)
309 { 460 {
310 first_bloc = last_bloc = NIL_BLOC; 461 first_bloc = last_bloc = NIL_BLOC;
311 } 462 }
312 else if (bloc == last_bloc) 463 else if (bloc == last_bloc)
323 { 474 {
324 bloc->next->prev = bloc->prev; 475 bloc->next->prev = bloc->prev;
325 bloc->prev->next = bloc->next; 476 bloc->prev->next = bloc->next;
326 } 477 }
327 478
328 relocate_some_blocs (bloc->next, bloc->data); 479 relinquish ();
329 relinquish (bloc->size);
330 free (bloc); 480 free (bloc);
331 } 481 }
332 482
333 /* Interface routines. */ 483 /* Interface routines. */
334 484
348 498
349 POINTER 499 POINTER
350 r_alloc_sbrk (size) 500 r_alloc_sbrk (size)
351 long size; 501 long size;
352 { 502 {
353 /* This is the first address not currently available for the heap. */ 503 register bloc_ptr b;
354 POINTER top; 504 POINTER address;
355 /* Amount of empty space below that. */
356 /* It is not correct to use SIZE here, because that is usually unsigned.
357 ptrdiff_t would be okay, but is not always available.
358 `long' will work in all cases, in practice. */
359 long already_available;
360 POINTER ptr;
361 505
362 if (! use_relocatable_buffers) 506 if (! use_relocatable_buffers)
363 return (*real_morecore) (size); 507 return (*real_morecore) (size);
364 508
365 top = first_bloc ? first_bloc->data : page_break_value; 509 if (size == 0)
366 already_available = (char *) top - (char *) virtual_break_value; 510 return virtual_break_value;
367 511
368 /* Do we not have enough gap already? */ 512 if (size > 0)
369 if (size > 0 && already_available < size) 513 {
370 { 514 /* Allocate a page-aligned space. GNU malloc would reclaim an
371 /* Get what we need, plus some extra so we can come here less often. */ 515 extra space if we passed an unaligned one. But we could
372 SIZE get = size - already_available + extra_bytes; 516 not always find a space which is contiguos to the previous. */
373 517 POINTER new_bloc_start;
374 if (r_alloc_freeze_level > 0 || ! obtain (get)) 518 heap_ptr h = first_heap;
375 return 0; 519 SIZE get = ROUNDUP(size);
376 520
377 if (first_bloc) 521 address = (POINTER) ROUNDUP(virtual_break_value);
378 relocate_some_blocs (first_bloc, first_bloc->data + get); 522
379 523 /* Search the list upward for a heap which is large enough. */
380 /* Zero out the space we just allocated, to help catch bugs 524 while ((char *) h->end < (char *) MEM_ROUNDUP((char *)address + get))
381 quickly. */ 525 {
382 bzero (virtual_break_value, get); 526 h = h->next;
383 } 527 if (h == NIL_HEAP)
384 /* Can we keep extra_bytes of gap while freeing at least extra_bytes? */ 528 break;
385 else if (size < 0 && already_available - size > 2 * extra_bytes 529 address = (POINTER) ROUNDUP(h->start);
386 && r_alloc_freeze_level == 0) 530 }
387 { 531
388 /* Ok, do so. This is how many to free. */ 532 /* If not found, obatin more space. */
389 SIZE give_back = already_available - size - extra_bytes; 533 if (h == NIL_HEAP)
390 534 {
391 if (first_bloc) 535 get += extra_bytes + page_size;
392 relocate_some_blocs (first_bloc, first_bloc->data - give_back); 536
393 relinquish (give_back); 537 if (r_alloc_freeze_level > 0 || ! obtain(address, get))
394 } 538 return 0;
395 539
396 ptr = virtual_break_value; 540 if (first_heap == last_heap)
397 virtual_break_value += size; 541 address = (POINTER) ROUNDUP(virtual_break_value);
398 542 else
399 return ptr; 543 address = (POINTER) ROUNDUP(last_heap->start);
544 h = last_heap;
545 }
546
547 new_bloc_start = (POINTER) MEM_ROUNDUP((char *)address + get);
548
549 if (first_heap->bloc_start < new_bloc_start)
550 {
551 /* Move all blocs upward. */
552 if (r_alloc_freeze_level > 0
553 || ! relocate_blocs (first_bloc, h, new_bloc_start))
554 return 0;
555
556 /* Note that (POINTER)(h+1) <= new_bloc_start since
557 get >= page_size, so the following does not destroy the heap
558 header. */
559 for (b = last_bloc; b != NIL_BLOC; b = b->prev)
560 {
561 safe_bcopy (b->data, b->new_data, b->size);
562 *b->variable = b->data = b->new_data;
563 }
564
565 h->bloc_start = new_bloc_start;
566 }
567
568 if (h != first_heap)
569 {
570 /* Give up managing heaps below the one the new
571 virtual_break_value points to. */
572 first_heap->prev = NIL_HEAP;
573 first_heap->next = h->next;
574 first_heap->start = h->start;
575 first_heap->end = h->end;
576 first_heap->bloc_start = h->bloc_start;
577
578 if (first_heap->next)
579 first_heap->next->prev = first_heap;
580 else
581 last_heap = first_heap;
582 }
583
584 bzero (address, size);
585 }
586 else /* size < 0 */
587 {
588 SIZE excess = (char *)first_heap->bloc_start
589 - ((char *)virtual_break_value + size);
590
591 address = virtual_break_value;
592
593 if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
594 {
595 excess -= extra_bytes;
596 first_heap->bloc_start
597 = (POINTER) MEM_ROUNDUP((char *)first_heap->bloc_start - excess);
598
599 relocate_blocs(first_bloc, first_heap, first_heap->bloc_start);
600
601 for (b = first_bloc; b != NIL_BLOC; b = b->next)
602 {
603 safe_bcopy (b->data, b->new_data, b->size);
604 *b->variable = b->data = b->new_data;
605 }
606 }
607
608 if ((char *)virtual_break_value + size < (char *)first_heap->start)
609 {
610 /* We found an additional space below the first heap */
611 first_heap->start = (POINTER) ((char *)virtual_break_value + size);
612 }
613 }
614
615 virtual_break_value = (POINTER) ((char *)address + size);
616 break_value = last_bloc ? last_bloc->data + last_bloc->size
617 : first_heap->bloc_start;
618 if (size < 0)
619 relinquish();
620
621 return address;
400 } 622 }
401 623
402 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to 624 /* Allocate a relocatable bloc of storage of size SIZE. A pointer to
403 the data is returned in *PTR. PTR is thus the address of some variable 625 the data is returned in *PTR. PTR is thus the address of some variable
404 which will use the data area. 626 which will use the data area.
414 register bloc_ptr new_bloc; 636 register bloc_ptr new_bloc;
415 637
416 if (! r_alloc_initialized) 638 if (! r_alloc_initialized)
417 r_alloc_init (); 639 r_alloc_init ();
418 640
419 new_bloc = get_bloc (size); 641 new_bloc = get_bloc (MEM_ROUNDUP(size));
420 if (new_bloc) 642 if (new_bloc)
421 { 643 {
422 new_bloc->variable = ptr; 644 new_bloc->variable = ptr;
423 *ptr = new_bloc->data; 645 *ptr = new_bloc->data;
424 } 646 }
468 690
469 if (size <= bloc->size) 691 if (size <= bloc->size)
470 /* Wouldn't it be useful to actually resize the bloc here? */ 692 /* Wouldn't it be useful to actually resize the bloc here? */
471 return *ptr; 693 return *ptr;
472 694
473 if (! obtain (size - bloc->size)) 695 if (! resize_bloc (bloc, MEM_ROUNDUP(size)))
474 return 0; 696 return 0;
475
476 relocate_some_blocs (bloc->next, bloc->data + size);
477
478 /* Zero out the new space in the bloc, to help catch bugs faster. */
479 bzero (bloc->data + bloc->size, size - bloc->size);
480
481 /* Indicate that this block has a new size. */
482 bloc->size = size;
483 697
484 return *ptr; 698 return *ptr;
485 } 699 }
486 700
487 /* Disable relocations, after making room for at least SIZE bytes 701 /* Disable relocations, after making room for at least SIZE bytes
517 /* Initialize various things for memory allocation. */ 731 /* Initialize various things for memory allocation. */
518 732
519 static void 733 static void
520 r_alloc_init () 734 r_alloc_init ()
521 { 735 {
736 static struct heap heap_base;
737 POINTER end;
738
522 if (r_alloc_initialized) 739 if (r_alloc_initialized)
523 return; 740 return;
524 741
525 r_alloc_initialized = 1; 742 r_alloc_initialized = 1;
526 real_morecore = __morecore; 743 real_morecore = __morecore;
527 __morecore = r_alloc_sbrk; 744 __morecore = r_alloc_sbrk;
528 745
529 virtual_break_value = break_value = (*real_morecore) (0); 746 first_heap = last_heap = &heap_base;
747 first_heap->next = first_heap->prev = NIL_HEAP;
748 first_heap->start = first_heap->bloc_start
749 = virtual_break_value = break_value = (*real_morecore) (0);
530 if (break_value == NIL) 750 if (break_value == NIL)
531 abort (); 751 abort ();
532 752
533 page_size = PAGE; 753 page_size = PAGE;
534 extra_bytes = ROUNDUP (50000); 754 extra_bytes = ROUNDUP (50000);
535 755
536 page_break_value = (POINTER) ROUNDUP (break_value); 756 first_heap->end = (POINTER) ROUNDUP (first_heap->start);
537 757
538 /* The extra call to real_morecore guarantees that the end of the 758 /* The extra call to real_morecore guarantees that the end of the
539 address space is a multiple of page_size, even if page_size is 759 address space is a multiple of page_size, even if page_size is
540 not really the page size of the system running the binary in 760 not really the page size of the system running the binary in
541 which page_size is stored. This allows a binary to be built on a 761 which page_size is stored. This allows a binary to be built on a
542 system with one page size and run on a system with a smaller page 762 system with one page size and run on a system with a smaller page
543 size. */ 763 size. */
544 (*real_morecore) (page_break_value - break_value); 764 (*real_morecore) (first_heap->end - first_heap->start);
545 765
546 /* Clear the rest of the last page; this memory is in our address space 766 /* Clear the rest of the last page; this memory is in our address space
547 even though it is after the sbrk value. */ 767 even though it is after the sbrk value. */
548 /* Doubly true, with the additional call that explicitly adds the 768 /* Doubly true, with the additional call that explicitly adds the
549 rest of that page to the address space. */ 769 rest of that page to the address space. */
550 bzero (break_value, (page_break_value - break_value)); 770 bzero (first_heap->start, first_heap->end - first_heap->start);
551 virtual_break_value = break_value = page_break_value; 771 virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
552 use_relocatable_buffers = 1; 772 use_relocatable_buffers = 1;
553 } 773 }
774 #ifdef DEBUG
775 #include <assert.h>
776
777 int
778 r_alloc_check ()
779 {
780 int found = 0;
781 heap_ptr h, ph = 0;
782 bloc_ptr b, pb = 0;
783
784 if (!r_alloc_initialized)
785 return;
786
787 assert(first_heap);
788 assert(last_heap->end <= (POINTER) sbrk(0));
789 assert((POINTER) first_heap < first_heap->start);
790 assert(first_heap->start <= virtual_break_value);
791 assert(virtual_break_value <= first_heap->end);
792
793 for (h = first_heap; h; h = h->next)
794 {
795 assert(h->prev == ph);
796 assert((POINTER) ROUNDUP(h->end) == h->end);
797 assert((POINTER) MEM_ROUNDUP(h->start) == h->start);
798 assert((POINTER) MEM_ROUNDUP(h->bloc_start) == h->bloc_start);
799 assert(h->start <= h->bloc_start && h->bloc_start <= h->end);
800
801 if (ph)
802 {
803 assert (ph->end < h->start);
804 assert (h->start <= (POINTER)h && (POINTER)(h+1) <= h->bloc_start);
805 }
806
807 if (h->bloc_start <= break_value && break_value <= h->end)
808 found = 1;
809
810 ph = h;
811 }
812
813 assert(found);
814 assert(last_heap == ph);
815
816 for (b = first_bloc; b; b = b->next)
817 {
818 assert(b->prev == pb);
819 assert((POINTER) MEM_ROUNDUP(b->data) == b->data);
820 assert((SIZE) MEM_ROUNDUP(b->size) == b->size);
821
822 ph = 0;
823 for (h = first_heap; h; h = h->next)
824 {
825 if (h->bloc_start <= b->data && b->data + b->size <= h->end)
826 break;
827 ph = h;
828 }
829
830 assert(h);
831
832 if (pb && pb->data + pb->size != b->data)
833 {
834 assert(ph && b->data == h->bloc_start);
835 while (ph)
836 {
837 if (ph->bloc_start <= pb->data
838 && pb->data + pb->size <= ph->end)
839 {
840 assert(pb->data + pb->size + b->size > ph->end);
841 break;
842 }
843 else
844 {
845 assert(ph->bloc_start + b->size > ph->end);
846 }
847 ph = ph->prev;
848 }
849 }
850 pb = b;
851 }
852
853 assert(last_bloc == pb);
854
855 if (last_bloc)
856 assert(last_bloc->data + last_bloc->size == break_value);
857 else
858 assert(first_heap->bloc_start == break_value);
859 }
860 #endif /* DEBUG */