115
|
1 /* Block-relocating memory allocator.
|
|
2 Copyright (C) 1990 Free Software Foundation, Inc.
|
|
3
|
|
4 This file is part of GNU Emacs.
|
|
5
|
|
6 GNU Emacs is free software; you can redistribute it and/or modify
|
|
7 it under the terms of the GNU General Public License as published by
|
|
8 the Free Software Foundation; either version 1, or (at your option)
|
|
9 any later version.
|
|
10
|
|
11 GNU Emacs is distributed in the hope that it will be useful,
|
|
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
14 GNU General Public License for more details.
|
|
15
|
|
16 You should have received a copy of the GNU General Public License
|
|
17 along with GNU Emacs; see the file COPYING. If not, write to
|
|
18 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
|
|
19
|
|
20 /* This package works by allocating blocks from a zone of memory
|
|
21 above that used by malloc (). When malloc needs more space that
|
|
22 would enter our zone, we relocate blocks upward. The bottom of
|
|
23 our zone is kept in the variable `virtual_break_value'. The top
|
|
24 of our zone is indicated by `real_break_value'.
|
|
25
|
|
26 As blocks are freed, a free list is maintained and we attempt
|
|
27 to satisfy further requests for space using a first-fit policy.
|
|
28 If there are holes, but none fit, memory is compacted and a new
|
|
29 block is obtained at the top of the zone.
|
|
30
|
|
31 NOTE that our blocks are always rounded to page boundaries. */
|
|
32
|
|
33 /*
|
|
34 NOTES:
|
|
35
|
|
36 Once this is stable, I can speed things up by intially leaving a large
|
|
37 gap between real_break_value and true_break_value, or maybe making
|
|
38 a large hole before the first block.
|
|
39
|
|
40 If we also kept track of size_wanted, we could gain some
|
|
41 extra space upon compactification.
|
|
42
|
|
43 Perhaps we should just note a hole when malloc does doing sbrk(-n)?
|
|
44
|
|
45 Relocating downward upon freeing the first block would simplify
|
|
46 other things.
|
|
47
|
|
48 When r_alloc places a block in a hole, we could easily check if there's
|
|
49 much more than required, and leave a hole.
|
|
50 */
|
|
51
|
|
52 #include "mem_limits.h"
|
|
53
|
|
54 static POINTER r_alloc_sbrk ();
|
|
55 static POINTER sbrk ();
|
|
56 static POINTER brk ();
|
|
57
|
|
58 /* Variable `malloc' uses for the function which gets more space
|
|
59 from the system. */
|
|
60 extern POINTER (*__morecore) ();
|
|
61
|
|
62 /* List of variables which point into the associated data block. */
|
|
63 struct other_pointer
|
|
64 {
|
|
65 POINTER *location;
|
|
66 struct other_pointer *next;
|
|
67 };
|
|
68
|
|
69 /* List describing all the user's pointers to relocatable blocks. */
|
|
70 typedef struct rel_pointers
|
|
71 {
|
|
72 struct rel_pointers *next;
|
|
73 struct rel_pointers *prev;
|
|
74 struct other_pointer *others; /* Other variables which use this block. */
|
|
75 POINTER *location; /* Location of the block's pointer. */
|
|
76 POINTER block; /* Address of the actual data. */
|
|
77 int size; /* The size of the block. */
|
|
78 } relocatable_pointer;
|
|
79
|
|
80 #define REL_NIL ((struct rel_pointers *) 0)
|
|
81
|
|
82 static relocatable_pointer *pointer_list;
|
|
83 static relocatable_pointer *last_pointer;
|
|
84
|
|
85 #define MAX_HOLES 2
|
|
86
|
|
87 /* Vector of available holes among allocated blocks. This can include
|
|
88 a hole at the beginning of the list, but never the end. */
|
|
89 typedef struct
|
|
90 {
|
|
91 POINTER address;
|
|
92 unsigned int size;
|
|
93 } hole_descriptor;
|
|
94
|
|
95 static hole_descriptor r_alloc_holes[MAX_HOLES];
|
|
96
|
|
97 /* Number of holes currently available. */
|
|
98 static int holes;
|
|
99
|
|
100 /* The process break value (i.e., curbrk) */
|
|
101 static POINTER real_break_value;
|
|
102
|
|
103 /* The REAL (i.e., page aligned) break value. */
|
|
104 static POINTER true_break_value;
|
|
105
|
|
106 /* Address of start of data space in use by relocatable blocks.
|
|
107 This is what `malloc' thinks is the process break value. */
|
|
108 static POINTER virtual_break_value;
|
|
109
|
|
110 /* Nonzero if we have told `malloc' to start using `r_alloc_sbrk'
|
|
111 instead of calling `sbrk' directly. */
|
|
112 int r_alloc_in_use;
|
|
113
|
|
114 #define PAGE (getpagesize ())
|
|
115 #define ALIGNED(addr) (((unsigned int) (addr) & (PAGE - 1)) == 0)
|
|
116 #define ROUNDUP(size) (((unsigned int) (size) + PAGE) & ~(PAGE - 1))
|
|
117
|
|
118 /*
|
|
119 Level number of warnings already issued.
|
|
120 0 -- no warnings issued.
|
|
121 1 -- 75% warning already issued.
|
|
122 2 -- 85% warning already issued.
|
|
123 */
|
|
124 static int warnlevel;
|
|
125
|
|
126 /* Function to call to issue a warning;
|
|
127 0 means don't issue them. */
|
|
128 static void (*warnfunction) ();
|
|
129
|
|
130 /* Call this to start things off. It determines the current process
|
|
131 break value, as well as the `true' break value--because the system
|
|
132 allocates memory in page increments, if the break value is not page
|
|
133 aligned it means that space up to the next page boundary is actually
|
|
134 available. */
|
|
135
|
|
136 void
|
|
137 malloc_init (start, warn_func)
|
|
138 POINTER start;
|
|
139 void (*warn_func) ();
|
|
140 {
|
|
141 r_alloc_in_use = 1;
|
|
142 __morecore = r_alloc_sbrk;
|
|
143
|
|
144 virtual_break_value = real_break_value = sbrk (0);
|
|
145 if (ALIGNED (real_break_value))
|
|
146 true_break_value = real_break_value;
|
|
147 else
|
|
148 true_break_value = (POINTER) ROUNDUP (real_break_value);
|
|
149
|
|
150 if (start)
|
|
151 data_space_start = start;
|
|
152 lim_data = 0;
|
|
153 warnlevel = 0;
|
|
154 warnfunction = warn_func;
|
|
155
|
|
156 get_lim_data ();
|
|
157 }
|
|
158
|
|
159 /* Get more space for us to use. Return a pointer to SIZE more
|
|
160 bytes of space. SIZE is internally rounded up to a page boundary,
|
|
161 and requests for integral pages prefetch an extra page. */
|
|
162
|
|
163 static POINTER
|
|
164 get_more_space (size)
|
|
165 unsigned int size;
|
|
166 {
|
|
167 unsigned int margin = true_break_value - real_break_value;
|
|
168 unsigned int get;
|
|
169 POINTER old_break = real_break_value;
|
|
170
|
|
171 if (size == 0)
|
|
172 return real_break_value;
|
|
173
|
|
174 if (size <= margin)
|
|
175 {
|
|
176 real_break_value += size;
|
|
177 return old_break;
|
|
178 }
|
|
179
|
|
180 get = ROUNDUP (size - margin);
|
|
181 if (sbrk (get) < (POINTER) 0)
|
|
182 return NULL;
|
|
183
|
|
184 true_break_value += get;
|
|
185 real_break_value = (old_break + size);
|
|
186
|
|
187 return old_break;
|
|
188 }
|
|
189
|
|
190 /* Relinquish size bytes of space to the system. Space is only returned
|
|
191 in page increments. If successful, return real_break_value. */
|
|
192
|
|
193 static POINTER
|
|
194 return_space (size)
|
|
195 unsigned int size;
|
|
196 {
|
|
197 unsigned int margin = (true_break_value - real_break_value) + size;
|
|
198 unsigned int to_return = (margin / PAGE) * PAGE;
|
|
199 unsigned new_margin = margin % PAGE;
|
|
200
|
|
201 true_break_value -= to_return;
|
|
202 if (! brk (true_break_value))
|
|
203 return NULL;
|
|
204
|
|
205 real_break_value = true_break_value - new_margin;
|
|
206 return real_break_value;
|
|
207 }
|
|
208
|
|
209 /* Record a new hole in memory beginning at ADDRESS of size SIZE.
|
|
210 Holes are ordered by location. Adjacent holes are merged.
|
|
211 Holes are zero filled before being noted. */
|
|
212
|
|
213 static void
|
|
214 note_hole (address, size)
|
|
215 POINTER address;
|
|
216 int size;
|
|
217 {
|
|
218 register int this_hole = holes - 1; /* Start at the last hole. */
|
|
219 register POINTER end = address + size; /* End of the hole. */
|
|
220 register int i;
|
|
221
|
|
222 if (holes)
|
|
223 {
|
|
224 /* Find the hole which should precede this new one. */
|
|
225 while (this_hole >= 0 && r_alloc_holes[this_hole].address > address)
|
|
226 this_hole--;
|
|
227
|
|
228 /* Can we merge with preceding? */
|
|
229 if (this_hole >= 0
|
|
230 && r_alloc_holes[this_hole].address + r_alloc_holes[this_hole].size
|
|
231 == address)
|
|
232 {
|
|
233 r_alloc_holes[this_hole].size += size;
|
|
234
|
|
235 if (this_hole == holes - 1)
|
|
236 return;
|
|
237
|
|
238 /* Can we also merge with following? */
|
|
239 if (end == r_alloc_holes[this_hole + 1].address)
|
|
240 {
|
|
241 r_alloc_holes[this_hole].size
|
|
242 += r_alloc_holes[this_hole + 1].size;
|
|
243
|
|
244 for (i = this_hole + 1; i < holes - 1; i++)
|
|
245 r_alloc_holes[i] = r_alloc_holes[i + 1];
|
|
246 holes--;
|
|
247 }
|
|
248
|
|
249 return;
|
|
250 }
|
|
251
|
|
252 if (this_hole < holes - 1) /* there are following holes */
|
|
253 {
|
|
254 register int next_hole = this_hole + 1;
|
|
255
|
|
256 /* Can we merge with the next hole? */
|
|
257 if (end == r_alloc_holes[next_hole].address)
|
|
258 {
|
|
259 r_alloc_holes[next_hole].address = address;
|
|
260 r_alloc_holes[next_hole].size += size;
|
|
261 return;
|
|
262 }
|
|
263
|
|
264 /* Can't merge, so insert. */
|
|
265 for (i = holes; i > next_hole; i--)
|
|
266 r_alloc_holes[i] = r_alloc_holes[i - 1];
|
|
267 r_alloc_holes[next_hole].address = address;
|
|
268 r_alloc_holes[next_hole].size = size;
|
|
269 holes++;
|
|
270
|
|
271 return;
|
|
272 }
|
|
273 else /* Simply add this hole at the end. */
|
|
274 {
|
|
275 r_alloc_holes[holes].address = address;
|
|
276 r_alloc_holes[holes].size = size;
|
|
277 holes++;
|
|
278
|
|
279 return;
|
|
280 }
|
|
281
|
|
282 abort ();
|
|
283 }
|
|
284 else /* Make the first hole. */
|
|
285 {
|
|
286 holes = 1;
|
|
287 r_alloc_holes[0].address = address;
|
|
288 r_alloc_holes[0].size = size;
|
|
289 }
|
|
290 }
|
|
291
|
|
292 /* Mark hole HOLE as no longer available by re-organizing the vector.
|
|
293 HOLE is the Nth hole, beginning with 0. This doesn *not* affect memory
|
|
294 organization. */
|
|
295
|
|
296 static void
|
|
297 delete_hole (hole)
|
|
298 int hole;
|
|
299 {
|
|
300 register int i;
|
|
301
|
|
302 for (i = hole; i < holes - 1; i++)
|
|
303 r_alloc_holes[i] = r_alloc_holes[i + 1];
|
|
304
|
|
305 holes--;
|
|
306 }
|
|
307
|
|
308 /* Insert a newly allocated pointer, NEW_PTR, at the appropriate
|
|
309 place in our list. */
|
|
310
|
|
311 static void
|
|
312 insert (new_ptr)
|
|
313 register relocatable_pointer *new_ptr;
|
|
314 {
|
|
315 register relocatable_pointer *this_ptr = pointer_list;
|
|
316
|
|
317 while (this_ptr != REL_NIL && this_ptr->block < new_ptr->block)
|
|
318 this_ptr = this_ptr->next;
|
|
319
|
|
320 if (this_ptr == REL_NIL)
|
|
321 abort (); /* Use `attach' for appending. */
|
|
322
|
|
323 new_ptr->next = this_ptr;
|
|
324 new_ptr->prev = this_ptr->prev;
|
|
325 this_ptr->prev = new_ptr;
|
|
326
|
|
327 if (this_ptr == pointer_list)
|
|
328 pointer_list = new_ptr;
|
|
329 else
|
|
330 new_ptr->prev->next = new_ptr;
|
|
331 }
|
|
332
|
|
333 /* Attach a newly allocated pointer, NEW_PTR, to the end of our list. */
|
|
334
|
|
335 static void
|
|
336 attach (new_ptr)
|
|
337 relocatable_pointer *new_ptr;
|
|
338 {
|
|
339 if (pointer_list == REL_NIL)
|
|
340 {
|
|
341 pointer_list = new_ptr;
|
|
342 last_pointer = new_ptr;
|
|
343 new_ptr->next = new_ptr->prev = REL_NIL;
|
|
344 }
|
|
345 else
|
|
346 {
|
|
347 new_ptr->next = REL_NIL;
|
|
348 last_pointer->next = new_ptr;
|
|
349 new_ptr->prev = last_pointer;
|
|
350 last_pointer = new_ptr;
|
|
351 }
|
|
352 }
|
|
353
|
|
354 static relocatable_pointer *
|
|
355 find_block (block)
|
|
356 POINTER block;
|
|
357 {
|
|
358 register relocatable_pointer *this_ptr = pointer_list;
|
|
359
|
|
360 while (this_ptr != REL_NIL && this_ptr->block != block)
|
|
361 this_ptr = this_ptr->next;
|
|
362
|
|
363 return this_ptr;
|
|
364 }
|
|
365
|
|
366 static relocatable_pointer *
|
|
367 find_location (address)
|
|
368 POINTER *address;
|
|
369 {
|
|
370 register relocatable_pointer *this_ptr = pointer_list;
|
|
371
|
|
372 while (this_ptr != REL_NIL && this_ptr->location != address)
|
|
373 {
|
|
374 struct other_pointer *op = this_ptr->others;
|
|
375
|
|
376 while (op != (struct other_pointer *) 0)
|
|
377 {
|
|
378 if (op->location == address)
|
|
379 return this_ptr;
|
|
380
|
|
381 op = op->next;
|
|
382 }
|
|
383
|
|
384 this_ptr = this_ptr->next;
|
|
385 }
|
|
386
|
|
387 return this_ptr;
|
|
388 }
|
|
389
|
|
390
|
|
391 static void compactify ();
|
|
392
|
|
393 /* Record of last new block allocated. */
|
|
394 static relocatable_pointer *last_record;
|
|
395
|
|
396 /* Allocate a block of size SIZE and record that PTR points to it.
|
|
397 If successful, store the address of the block in *PTR and return
|
|
398 it as well. Otherwise return NULL. */
|
|
399
|
|
400 POINTER
|
|
401 r_alloc (ptr, size)
|
|
402 POINTER *ptr;
|
|
403 int size;
|
|
404 {
|
|
405 register relocatable_pointer *record
|
|
406 = (relocatable_pointer *) malloc (sizeof (relocatable_pointer));
|
|
407 register POINTER block;
|
|
408
|
|
409 /* If we can't get space to record this pointer, fail. */
|
|
410 if (record == 0)
|
|
411 return NULL;
|
|
412
|
|
413 last_record = record;
|
|
414
|
|
415 if (holes) /* Search for a hole the right size. */
|
|
416 {
|
|
417 int i;
|
|
418
|
|
419 for (i = 0; i < holes; i++)
|
|
420 if (r_alloc_holes[i].size >= size)
|
|
421 {
|
|
422 record->location = ptr;
|
|
423 record->others = (struct other_pointer *) 0;
|
|
424 record->block = *ptr = r_alloc_holes[i].address;
|
|
425 if (r_alloc_holes[i].size > ROUNDUP (size))
|
|
426 {
|
|
427 record->size = ROUNDUP (size);
|
|
428 r_alloc_holes[i].size -= ROUNDUP (size);
|
|
429 r_alloc_holes[i].address += ROUNDUP (size);
|
|
430 }
|
|
431 else
|
|
432 {
|
|
433 record->size = r_alloc_holes[i].size;
|
|
434 delete_hole (i);
|
|
435 }
|
|
436 insert (record);
|
|
437
|
|
438 *ptr = record->block;
|
|
439 return record->block;
|
|
440 }
|
|
441
|
|
442 /* No holes large enough. Burp. */
|
|
443 compactify ();
|
|
444 }
|
|
445
|
|
446 /* No holes: grow the process. */
|
|
447 block = get_more_space (size);
|
|
448 if (block == NULL)
|
|
449 {
|
|
450 free (record);
|
|
451 return NULL;
|
|
452 }
|
|
453
|
|
454 /* Return the address of the block. */
|
|
455 *ptr = block;
|
|
456
|
|
457 /* Record and append this pointer to our list. */
|
|
458 record->location = ptr;
|
|
459 record->others = (struct other_pointer *) 0;
|
|
460 record->block = block;
|
|
461 record->size = size;
|
|
462 attach (record);
|
|
463
|
|
464 return block;
|
|
465 }
|
|
466
|
|
467 /* Declare VAR to be a pointer which points into the block of r_alloc'd
|
|
468 memory at BLOCK.
|
|
469
|
|
470 If VAR is already delcared for this block, simply return.
|
|
471 If VAR currently points to some other block, remove that declaration
|
|
472 of it, then install the new one.
|
|
473
|
|
474 Return 0 if successful, -1 otherwise. */
|
|
475
|
|
476 int
|
|
477 r_alloc_declare (var, block)
|
|
478 POINTER *var;
|
|
479 register POINTER block;
|
|
480 {
|
|
481 register relocatable_pointer *block_ptr = find_block (block);
|
|
482 relocatable_pointer *var_ptr = find_location (var);
|
|
483 register struct other_pointer *other;
|
|
484
|
|
485 if (block_ptr == REL_NIL)
|
|
486 abort ();
|
|
487
|
|
488 if (var_ptr != REL_NIL) /* Var already declared somewhere. */
|
|
489 {
|
|
490 register struct other_pointer *po;
|
|
491
|
|
492 if (var_ptr == block_ptr) /* Var already points to this block. */
|
|
493 return 0;
|
|
494
|
|
495 po = (struct other_pointer *) 0;
|
|
496 other = var_ptr->others;
|
|
497 while (other && other->location != var)
|
|
498 {
|
|
499 po = other;
|
|
500 other = other->next;
|
|
501 }
|
|
502
|
|
503 if (!other) /* This only happens if the location is */
|
|
504 abort (); /* the main pointer and not an `other' */
|
|
505
|
|
506 if (po) /* In the chain */
|
|
507 {
|
|
508 po->next = other->next;
|
|
509 free (other);
|
|
510 }
|
|
511 else /* Only element of the chain */
|
|
512 {
|
|
513 free (var_ptr->others);
|
|
514 var_ptr->others = (struct other_pointer *) 0;
|
|
515 }
|
|
516 }
|
|
517
|
|
518 /* Install this variable as an `other' element */
|
|
519
|
|
520 other = (struct other_pointer *) malloc (sizeof (struct other_pointer));
|
|
521
|
|
522 if (other == 0)
|
|
523 return -1;
|
|
524
|
|
525 /* If the malloc relocated this data block, adjust this variable. */
|
|
526 if (block != block_ptr->block)
|
|
527 {
|
|
528 int offset = block_ptr->block - block;
|
|
529
|
|
530 *var += offset;
|
|
531 }
|
|
532
|
|
533 other->location = var;
|
|
534 other->next = (struct other_pointer *) 0;
|
|
535
|
|
536 if (block_ptr->others == (struct other_pointer *) 0)
|
|
537 block_ptr->others = other;
|
|
538 else
|
|
539 {
|
|
540 register struct other_pointer *op = block_ptr->others;
|
|
541
|
|
542 while (op->next != (struct other_pointer *) 0)
|
|
543 op = op->next;
|
|
544 op->next = other;
|
|
545 }
|
|
546
|
|
547 return 0;
|
|
548 }
|
|
549
|
|
550 /* Recursively free the linked list of `other' pointers to a block. */
|
|
551
|
|
552 static void
|
|
553 free_others (another)
|
|
554 struct other_pointer *another;
|
|
555 {
|
|
556 if (another == (struct other_pointer *) 0)
|
|
557 return;
|
|
558
|
|
559 free_others (another->next);
|
|
560 free (another);
|
|
561 }
|
|
562
|
|
563 /* Remove the element pointed to by PTR from the doubly linked list.
|
|
564 Record the newly freed space in `holes', unless it was at the end,
|
|
565 in which case return that space to the system. Return 0 if successful,
|
|
566 -1 otherwise. */
|
|
567
|
|
568 int
|
|
569 r_alloc_free (ptr)
|
|
570 register POINTER *ptr;
|
|
571 {
|
|
572 register relocatable_pointer *this_ptr = find_block (*ptr);
|
|
573
|
|
574 if (this_ptr == REL_NIL)
|
|
575 return -1;
|
|
576 else
|
|
577 {
|
|
578 register relocatable_pointer *prev = this_ptr->prev;
|
|
579 register relocatable_pointer *next = this_ptr->next;
|
|
580 if (next && prev) /* Somewhere in the middle */
|
|
581 {
|
|
582 next->prev = prev;
|
|
583 prev->next = next;
|
|
584 }
|
|
585 else if (prev) /* Last block */
|
|
586 {
|
|
587 prev->next = REL_NIL;
|
|
588 last_pointer = prev;
|
|
589 return_space (this_ptr->size);
|
|
590 free_others (this_ptr->others);
|
|
591 free (this_ptr);
|
|
592
|
|
593 return 0;
|
|
594 }
|
|
595 else if (next) /* First block */
|
|
596 {
|
|
597 next->prev = REL_NIL;
|
|
598 pointer_list = next;
|
|
599 }
|
|
600 else if (this_ptr = pointer_list) /* ONLY block */
|
|
601 {
|
|
602 pointer_list = REL_NIL;
|
|
603 last_pointer = REL_NIL;
|
|
604 if (holes) /* A hole precedes this block. */
|
|
605 {
|
|
606 holes = 0;
|
|
607 return_space (real_break_value - virtual_break_value);
|
|
608 }
|
|
609 else
|
|
610 return_space (this_ptr->size);
|
|
611
|
|
612 if (real_break_value != virtual_break_value)
|
|
613 abort ();
|
|
614
|
|
615 free_others (this_ptr->others);
|
|
616 free (this_ptr);
|
|
617 /* Turn off r_alloc_in_use? */
|
|
618
|
|
619 return 0;
|
|
620 }
|
|
621 else
|
|
622 abort (); /* Weird shit */
|
|
623
|
|
624 free_others (this_ptr->others);
|
|
625 free (this_ptr);
|
|
626 bzero (this_ptr->block, this_ptr->size);
|
|
627 note_hole (this_ptr->block, this_ptr->size);
|
|
628
|
|
629 if (holes == MAX_HOLES)
|
|
630 compactify ();
|
|
631 }
|
|
632
|
|
633 return 0;
|
|
634 }
|
|
635
|
|
636 /* Change the size of the block pointed to by the thing in PTR.
|
|
637 If neccessary, r_alloc a new block and copy the data there.
|
|
638 Return a pointer to the block if successfull, NULL otherwise.
|
|
639
|
|
640 Note that if the size requested is less than the actual bloc size,
|
|
641 nothing is done and the pointer is simply returned. */
|
|
642
|
|
643 POINTER
|
|
644 r_re_alloc (ptr, size)
|
|
645 POINTER *ptr;
|
|
646 int size;
|
|
647 {
|
|
648 register relocatable_pointer *this_ptr = find_block (*ptr);
|
|
649 POINTER block;
|
|
650
|
|
651 if (! this_ptr)
|
|
652 return NULL;
|
|
653
|
|
654 if (this_ptr->size >= size) /* Already have enough space. */
|
|
655 return *ptr;
|
|
656
|
|
657 /* Here we could try relocating the blocks just above... */
|
|
658 block = r_alloc (ptr, size);
|
|
659 if (block)
|
|
660 {
|
|
661 bcopy (this_ptr->block, block, this_ptr->size);
|
|
662 if (this_ptr->others)
|
|
663 last_record->others = this_ptr->others;
|
|
664
|
|
665 if (! r_alloc_free (this_ptr->block))
|
|
666 abort ();
|
|
667
|
|
668 *ptr = block;
|
|
669 return block;
|
|
670 }
|
|
671
|
|
672 return NULL;
|
|
673 }
|
|
674
|
|
675
|
|
676 /* Move and relocate all blocks from FIRST_PTR to LAST_PTR, inclusive,
|
|
677 downwards to space starting at ADDRESS. */
|
|
678
|
|
679 static int
|
|
680 move_blocks_downward (first_ptr, last_ptr, address)
|
|
681 relocatable_pointer *first_ptr, *last_ptr;
|
|
682 POINTER address;
|
|
683 {
|
|
684 int size = (last_ptr->block + last_ptr->size) - first_ptr->block;
|
|
685 register relocatable_pointer *this_ptr = first_ptr;
|
|
686 register offset = first_ptr->block - address;
|
|
687 register struct other_pointer *op;
|
|
688
|
|
689 /* Move all the data. */
|
|
690 bcopy (first_ptr->block, address, size);
|
|
691
|
|
692 /* Now relocate all the pointers to those blocks. */
|
|
693 while (1)
|
|
694 {
|
|
695 this_ptr->block -= offset;
|
|
696 *this_ptr->location = this_ptr->block;
|
|
697
|
|
698 op = this_ptr->others;
|
|
699 while (op != (struct other_pointer *) 0)
|
|
700 {
|
|
701 *op->location -= offset;
|
|
702 op = op->next;
|
|
703 }
|
|
704
|
|
705 if (this_ptr == last_ptr)
|
|
706 return;
|
|
707 else
|
|
708 this_ptr = this_ptr->next;
|
|
709 }
|
|
710
|
|
711 return size;
|
|
712 }
|
|
713
|
|
714 /* Burp our memory zone. */
|
|
715
|
|
716 static void
|
|
717 compactify ()
|
|
718 {
|
|
719 register relocatable_pointer *this_ptr = pointer_list;
|
|
720 relocatable_pointer *first_to_move;
|
|
721 register relocatable_pointer *last_to_move;
|
|
722 hole_descriptor *this_hole = &r_alloc_holes[0];
|
|
723 register hole_descriptor *next_hole;
|
|
724 register POINTER end; /* First address after hole */
|
|
725 unsigned int space_regained = 0;
|
|
726
|
|
727 while (holes) /* While there are holes */
|
|
728 {
|
|
729 /* Find the first block after this hole. */
|
|
730 end = this_hole->address + this_hole->size;
|
|
731 while (this_ptr && this_ptr->block != end)
|
|
732 this_ptr = this_ptr->next;
|
|
733
|
|
734 if (! this_ptr)
|
|
735 abort ();
|
|
736
|
|
737 next_hole = this_hole + 1;
|
|
738 last_to_move = first_to_move = this_ptr;
|
|
739 this_ptr = this_ptr->next;
|
|
740
|
|
741 /* Note all blocks located before the next hole. */
|
|
742 while (this_ptr && this_ptr->block < next_hole->address)
|
|
743 {
|
|
744 last_to_move = this_ptr;
|
|
745 this_ptr = this_ptr->next;
|
|
746 }
|
|
747 space_regained +=
|
|
748 move_blocks_downward (first_to_move, last_to_move, this_hole->address);
|
|
749
|
|
750 holes--;
|
|
751 this_hole = next_hole;
|
|
752 }
|
|
753
|
|
754 return_space (space_regained);
|
|
755 }
|
|
756
|
|
757 /* Relocate the list elements from the beginning of the list up to and
|
|
758 including UP_TO_THIS_PTR to the area beginning at FREE_SPACE, which is
|
|
759 after all current blocks.
|
|
760
|
|
761 First copy all the data, then adjust the pointers and reorganize
|
|
762 the list. NOTE that this *only* works for contiguous blocks. */
|
|
763
|
|
764 static unsigned int
|
|
765 relocate_to_end (up_to_this_ptr, free_space)
|
|
766 register relocatable_pointer *up_to_this_ptr;
|
|
767 POINTER free_space;
|
|
768 {
|
|
769 register relocatable_pointer *this_ptr;
|
|
770 POINTER block_start = pointer_list->block;
|
|
771 POINTER block_end = up_to_this_ptr->block + up_to_this_ptr->size;
|
|
772 unsigned int total_size = block_end - block_start;
|
|
773 unsigned int offset = (int) (free_space - block_start);
|
|
774
|
|
775 bcopy (block_start, free_space, total_size);
|
|
776 for (this_ptr = up_to_this_ptr; this_ptr; this_ptr = this_ptr->prev)
|
|
777 {
|
|
778 struct other_pointer *op = this_ptr->others;
|
|
779
|
|
780 *this_ptr->location += offset;
|
|
781 this_ptr->block += offset;
|
|
782
|
|
783 while (op != (struct other_pointer *) 0)
|
|
784 {
|
|
785 *op->location += offset;
|
|
786 op = op->next;
|
|
787 }
|
|
788 }
|
|
789
|
|
790 /* Connect the head to the tail. */
|
|
791 last_pointer->next = pointer_list;
|
|
792 pointer_list->prev = last_pointer;
|
|
793
|
|
794 /* Disconnect */
|
|
795 up_to_this_ptr->next->prev = REL_NIL;
|
|
796 pointer_list = up_to_this_ptr->next;
|
|
797 up_to_this_ptr->next = REL_NIL;
|
|
798 last_pointer = up_to_this_ptr;
|
|
799
|
|
800 return total_size; /* of space relocated. */
|
|
801 }
|
|
802
|
|
803 /* Relocate the list elements from FROM_THIS_PTR to (and including)
|
|
804 the last to the zone beginning at FREE_SPACE, which is located
|
|
805 before any blocks.
|
|
806
|
|
807 First copy all the data, then adjust the pointers and reorganize
|
|
808 the list. NOTE that this *only* works for contiguous blocks. */
|
|
809
|
|
810 static unsigned int
|
|
811 relocate_to_beginning (from_this_ptr, free_space)
|
|
812 register relocatable_pointer *from_this_ptr;
|
|
813 POINTER free_space;
|
|
814 {
|
|
815 POINTER block_start = from_this_ptr->block;
|
|
816 POINTER block_end = last_pointer->block + last_pointer->size;
|
|
817 unsigned int total_size = (int) (block_end - block_start);
|
|
818 unsigned int offset = (int) (from_this_ptr->block - free_space);
|
|
819 register relocatable_pointer *this_ptr;
|
|
820
|
|
821 bcopy (block_start, free_space, total_size);
|
|
822 for (this_ptr = from_this_ptr; this_ptr; this_ptr = this_ptr->next)
|
|
823 {
|
|
824 struct other_pointer *op = this_ptr->others;
|
|
825
|
|
826 *this_ptr->location -= offset;
|
|
827 this_ptr->block -= offset;
|
|
828
|
|
829 while (op != (struct other_pointer *) 0)
|
|
830 {
|
|
831 *op->location -= offset;
|
|
832 op = op->next;
|
|
833 }
|
|
834 }
|
|
835
|
|
836 /* Connect the end to the beginning. */
|
|
837 last_pointer->next = pointer_list;
|
|
838 pointer_list->prev = last_pointer;
|
|
839
|
|
840 /* Disconnect and reset first and last. */
|
|
841 from_this_ptr->prev->next = REL_NIL;
|
|
842 last_pointer = from_this_ptr->prev;
|
|
843 pointer_list = from_this_ptr;
|
|
844 pointer_list->prev = REL_NIL;
|
|
845
|
|
846 return total_size; /* of space moved. */
|
|
847 }
|
|
848
|
|
849 /* Relocate any blocks neccessary, either upwards or downwards,
|
|
850 to obtain a space of SIZE bytes. Assumes we have at least one block. */
|
|
851
|
|
852 static unsigned int
|
|
853 relocate (size)
|
|
854 register int size;
|
|
855 {
|
|
856 register relocatable_pointer *ptr;
|
|
857 register int got = 0;
|
|
858
|
|
859 if (size > 0) /* Up: Relocate enough blocs to get SIZE. */
|
|
860 {
|
|
861 register POINTER new_space;
|
|
862
|
|
863 for (ptr = pointer_list; got < size && ptr; ptr = ptr->next)
|
|
864 got += ptr->size;
|
|
865
|
|
866 if (ptr == REL_NIL)
|
|
867 ptr = last_pointer;
|
|
868
|
|
869 new_space = get_more_space (size);
|
|
870 if (!new_space)
|
|
871 return 0;
|
|
872
|
|
873 return (relocate_to_end (ptr, pointer_list->block + size));
|
|
874 }
|
|
875
|
|
876 if (size < 0) /* Down: relocate as many blocs as will
|
|
877 fit in SIZE bytes of space. */
|
|
878 {
|
|
879 register POINTER to_zone;
|
|
880 unsigned int moved;
|
|
881
|
|
882 for (ptr = last_pointer; got >= size && ptr; ptr = ptr->prev)
|
|
883 got -= ptr->size;
|
|
884
|
|
885 if (ptr == REL_NIL)
|
|
886 ptr = pointer_list;
|
|
887 else
|
|
888 {
|
|
889 /* Back off one block to be <= size */
|
|
890 got += ptr->size;
|
|
891 ptr = ptr->next;
|
|
892 }
|
|
893
|
|
894 if (got >= size)
|
|
895 {
|
|
896 to_zone = virtual_break_value - size + got;
|
|
897 moved = relocate_to_beginning (ptr, to_zone);
|
|
898 if (moved)
|
|
899 return_space (moved);
|
|
900
|
|
901 return moved;
|
|
902 }
|
|
903
|
|
904 return 0;
|
|
905 }
|
|
906
|
|
907 abort ();
|
|
908 }
|
|
909
|
|
910 /* This function encapsulates `sbrk' to preserve the relocatable blocks.
|
|
911 It is called just like `sbrk'. When relocatable blocks are in use,
|
|
912 `malloc' must use this function instead of `sbrk'. */
|
|
913
|
|
914 POINTER
|
|
915 r_alloc_sbrk (size)
|
|
916 unsigned int size;
|
|
917 {
|
|
918 POINTER new_zone; /* Start of the zone we will return. */
|
|
919
|
|
920 #if 0
|
|
921 if (! r_alloc_in_use)
|
|
922 return (POINTER) sbrk (size);
|
|
923 #endif
|
|
924
|
|
925 if (size == 0)
|
|
926 return virtual_break_value;
|
|
927
|
|
928 if (size > 0) /* Get more space */
|
|
929 {
|
|
930 register unsigned int space;
|
|
931
|
|
932 if (pointer_list == REL_NIL)
|
|
933 {
|
|
934 POINTER space = get_more_space (size);
|
|
935
|
|
936 virtual_break_value = real_break_value;
|
|
937 return space;
|
|
938 }
|
|
939
|
|
940 new_zone = virtual_break_value;
|
|
941
|
|
942 /* Check if there is a hole just before the buffer zone. */
|
|
943 if (holes && r_alloc_holes[0].address == virtual_break_value)
|
|
944 {
|
|
945 if (r_alloc_holes[0].size > size)
|
|
946 {
|
|
947 /* Adjust the hole size. */
|
|
948 r_alloc_holes[0].size -= size;
|
|
949 r_alloc_holes[0].address += size;
|
|
950 virtual_break_value += size;
|
|
951
|
|
952 return new_zone;
|
|
953 }
|
|
954
|
|
955 if (r_alloc_holes[0].size == size)
|
|
956 {
|
|
957 virtual_break_value += size;
|
|
958 delete_hole (0);
|
|
959
|
|
960 return new_zone;
|
|
961 }
|
|
962
|
|
963 /* Adjust the size requested by space
|
|
964 already available in this hole. */
|
|
965 size -= r_alloc_holes[0].size;
|
|
966 virtual_break_value += r_alloc_holes[0].size;
|
|
967 delete_hole (0);
|
|
968 }
|
|
969
|
|
970 space = relocate (size);
|
|
971 if (!space)
|
|
972 return (POINTER) -1;
|
|
973
|
|
974 #ifdef REL_ALLOC_SAVE_SPACE
|
|
975 move_blocks_downward
|
|
976 #else
|
|
977 bzero (new_zone, space);
|
|
978 if (space > size)
|
|
979 note_hole (new_zone + size, space - size);
|
|
980 #endif /* REL_ALLOC_SAVE_SPACE */
|
|
981
|
|
982 virtual_break_value += size;
|
|
983 return new_zone;
|
|
984 }
|
|
985 else /* Return space to system */
|
|
986 {
|
|
987 int moved;
|
|
988 int left_over;
|
|
989 POINTER old_break_value;
|
|
990
|
|
991 if (pointer_list == REL_NIL)
|
|
992 {
|
|
993 POINTER space = return_space (-size);
|
|
994 virtual_break_value = real_break_value;
|
|
995
|
|
996 return space;
|
|
997 }
|
|
998
|
|
999 if (holes && r_alloc_holes[0].address == virtual_break_value)
|
|
1000 {
|
|
1001 size -= r_alloc_holes[0].size;
|
|
1002 delete_hole (0);
|
|
1003 }
|
|
1004
|
|
1005 moved = relocate (size);
|
|
1006 old_break_value = virtual_break_value;
|
|
1007
|
|
1008 if (!moved)
|
|
1009 return (POINTER) -1;
|
|
1010
|
|
1011 left_over = moved + size;
|
|
1012 virtual_break_value += size;
|
|
1013
|
|
1014 if (left_over)
|
|
1015 {
|
|
1016 #ifdef REL_ALLOC_SAVE_SPACE
|
|
1017 move_blocks_downward
|
|
1018 #else
|
|
1019 bzero (virtual_break_value, left_over);
|
|
1020 note_hole (virtual_break_value, left_over);
|
|
1021 #endif /* not REL_ALLOC_SAVE_SPACE */
|
|
1022 }
|
|
1023
|
|
1024 return old_break_value;
|
|
1025 }
|
|
1026 }
|
|
1027
|
|
1028 /* For debugging */
|
|
1029
|
|
1030 #include <stdio.h>
|
|
1031
|
|
1032 void
|
|
1033 memory_trace ()
|
|
1034 {
|
|
1035 relocatable_pointer *ptr;
|
|
1036 int i;
|
|
1037
|
|
1038 fprintf (stderr, "virtual: 0x%x\n real: 0x%x\n true: 0x%x\n\n",
|
|
1039 virtual_break_value, real_break_value, true_break_value);
|
|
1040 fprintf (stderr, "Blocks:\n");
|
|
1041 for (ptr = pointer_list; ptr; ptr = ptr->next)
|
|
1042 {
|
|
1043 fprintf (stderr, " address: 0x%x\n", ptr->block);
|
|
1044 fprintf (stderr, " size: 0x%x\n", ptr->size);
|
|
1045 if (ptr->others)
|
|
1046 {
|
|
1047 struct other_pointer *op = ptr->others;
|
|
1048 fprintf (stderr, " others:", ptr->size);
|
|
1049 while (op)
|
|
1050 {
|
|
1051 fprintf (stderr, " 0x%x", op->location);
|
|
1052 op = op->next;
|
|
1053 }
|
|
1054 fprintf (stderr, "\n");
|
|
1055 }
|
|
1056 }
|
|
1057
|
|
1058 if (holes)
|
|
1059 {
|
|
1060 fprintf (stderr, "\nHoles:\n");
|
|
1061 for (i = 0; i < holes; i++)
|
|
1062 {
|
|
1063 fprintf (stderr, " address: 0x%x\n", r_alloc_holes[i].address);
|
|
1064 fprintf (stderr, " size: 0x%x\n", r_alloc_holes[i].size);
|
|
1065 }
|
|
1066 }
|
|
1067
|
|
1068 fprintf (stderr, "\n\n");
|
|
1069 }
|