comparison src/gmalloc.c @ 78051:2c30fdff6890

[HAVE_GTK_AND_PTHREAD] Check this after including config.h. (_aligned_blocks_mutex) [USE_PTHREAD]: New variable. (LOCK_ALIGNED_BLOCKS, UNLOCK_ALIGNED_BLOCKS): New macros. (_free_internal, memalign): Use them. (_malloc_mutex, _aligned_blocks_mutex) [USE_PTHREAD]: Initialize to PTHREAD_MUTEX_INITIALIZER. (malloc_initialize_1) [USE_PTHREAD]: Don't use recursive mutex. (morecore_nolock): Rename from morecore. All uses changed. Use only nolock versions of internal allocation functions. (_malloc_internal_nolock, _realloc_internal_nolock) (_free_internal_nolock): New functions created from _malloc_internal, _realloc_internal, and _free_internal. (_malloc_internal, _realloc_internal, _free_internal): Use them. Copy hook value to automatic variable before its use. (memalign): Copy hook value to automatic variable before its use.
author YAMAMOTO Mitsuharu <mituharu@math.s.chiba-u.ac.jp>
date Tue, 26 Jun 2007 03:29:05 +0000
parents 2bb28b957639
children 77430fdfce38
comparison
equal deleted inserted replaced
78050:74d45d6140ca 78051:2c30fdff6890
1 /* This file is no longer automatically generated from libc. */ 1 /* This file is no longer automatically generated from libc. */
2 2
3 #define _MALLOC_INTERNAL 3 #define _MALLOC_INTERNAL
4 #ifdef HAVE_GTK_AND_PTHREAD
5 #define USE_PTHREAD
6 #endif
7 4
8 /* The malloc headers and source files from the C library follow here. */ 5 /* The malloc headers and source files from the C library follow here. */
9 6
10 /* Declarations for `malloc' and friends. 7 /* Declarations for `malloc' and friends.
11 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004, 8 Copyright (C) 1990, 1991, 1992, 1993, 1995, 1996, 1999, 2002, 2003, 2004,
36 33
37 #ifdef _MALLOC_INTERNAL 34 #ifdef _MALLOC_INTERNAL
38 35
39 #ifdef HAVE_CONFIG_H 36 #ifdef HAVE_CONFIG_H
40 #include <config.h> 37 #include <config.h>
38 #endif
39
40 #ifdef HAVE_GTK_AND_PTHREAD
41 #define USE_PTHREAD
41 #endif 42 #endif
42 43
43 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \ 44 #if ((defined __cplusplus || (defined (__STDC__) && __STDC__) \
44 || defined STDC_HEADERS || defined PROTOTYPES) \ 45 || defined STDC_HEADERS || defined PROTOTYPES) \
45 && ! defined (BROKEN_PROTOTYPES)) 46 && ! defined (BROKEN_PROTOTYPES))
233 used when these functions need to call each other. 234 used when these functions need to call each other.
234 They are the same but don't call the hooks. */ 235 They are the same but don't call the hooks. */
235 extern __ptr_t _malloc_internal PP ((__malloc_size_t __size)); 236 extern __ptr_t _malloc_internal PP ((__malloc_size_t __size));
236 extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size)); 237 extern __ptr_t _realloc_internal PP ((__ptr_t __ptr, __malloc_size_t __size));
237 extern void _free_internal PP ((__ptr_t __ptr)); 238 extern void _free_internal PP ((__ptr_t __ptr));
239 extern __ptr_t _malloc_internal_nolock PP ((__malloc_size_t __size));
240 extern __ptr_t _realloc_internal_nolock PP ((__ptr_t __ptr, __malloc_size_t __size));
241 extern void _free_internal_nolock PP ((__ptr_t __ptr));
238 242
239 #ifdef USE_PTHREAD 243 #ifdef USE_PTHREAD
240 extern pthread_mutex_t _malloc_mutex; 244 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
241 #define LOCK() pthread_mutex_lock (&_malloc_mutex) 245 #define LOCK() pthread_mutex_lock (&_malloc_mutex)
242 #define UNLOCK() pthread_mutex_unlock (&_malloc_mutex) 246 #define UNLOCK() pthread_mutex_unlock (&_malloc_mutex)
247 #define LOCK_ALIGNED_BLOCKS() pthread_mutex_lock (&_aligned_blocks_mutex)
248 #define UNLOCK_ALIGNED_BLOCKS() pthread_mutex_unlock (&_aligned_blocks_mutex)
243 #else 249 #else
244 #define LOCK() 250 #define LOCK()
245 #define UNLOCK() 251 #define UNLOCK()
252 #define LOCK_ALIGNED_BLOCKS()
253 #define UNLOCK_ALIGNED_BLOCKS()
246 #endif 254 #endif
247 255
248 #endif /* _MALLOC_INTERNAL. */ 256 #endif /* _MALLOC_INTERNAL. */
249 257
250 /* Given an address in the middle of a malloc'd object, 258 /* Given an address in the middle of a malloc'd object,
552 _heapinfo[block + blocks].busy.info.size = -blocks; 560 _heapinfo[block + blocks].busy.info.size = -blocks;
553 } 561 }
554 562
555 #ifdef USE_PTHREAD 563 #ifdef USE_PTHREAD
556 static pthread_once_t malloc_init_once_control = PTHREAD_ONCE_INIT; 564 static pthread_once_t malloc_init_once_control = PTHREAD_ONCE_INIT;
557 pthread_mutex_t _malloc_mutex; 565 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
566 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
558 #endif 567 #endif
559 568
560 static void 569 static void
561 malloc_initialize_1 () 570 malloc_initialize_1 ()
562 { 571 {
565 #endif 574 #endif
566 575
567 if (__malloc_initialize_hook) 576 if (__malloc_initialize_hook)
568 (*__malloc_initialize_hook) (); 577 (*__malloc_initialize_hook) ();
569 578
570 #ifdef USE_PTHREAD 579 /* We don't use recursive mutex because pthread_mutexattr_init may
580 call malloc internally. */
581 #if 0 /* defined (USE_PTHREAD) */
571 { 582 {
572 pthread_mutexattr_t attr; 583 pthread_mutexattr_t attr;
573 584
574 pthread_mutexattr_init (&attr); 585 pthread_mutexattr_init (&attr);
575 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE); 586 pthread_mutexattr_settype (&attr, PTHREAD_MUTEX_RECURSIVE);
614 625
615 static int morecore_recursing; 626 static int morecore_recursing;
616 627
617 /* Get neatly aligned memory, initializing or 628 /* Get neatly aligned memory, initializing or
618 growing the heap info table as necessary. */ 629 growing the heap info table as necessary. */
619 static __ptr_t morecore PP ((__malloc_size_t)); 630 static __ptr_t morecore_nolock PP ((__malloc_size_t));
620 static __ptr_t 631 static __ptr_t
621 morecore (size) 632 morecore_nolock (size)
622 __malloc_size_t size; 633 __malloc_size_t size;
623 { 634 {
624 __ptr_t result; 635 __ptr_t result;
625 malloc_info *newinfo, *oldinfo; 636 malloc_info *newinfo, *oldinfo;
626 __malloc_size_t newsize; 637 __malloc_size_t newsize;
659 extend it in place or relocate it to existing sufficient core, 670 extend it in place or relocate it to existing sufficient core,
660 we will get called again, and the code above will notice the 671 we will get called again, and the code above will notice the
661 `morecore_recursing' flag and return null. */ 672 `morecore_recursing' flag and return null. */
662 int save = errno; /* Don't want to clobber errno with ENOMEM. */ 673 int save = errno; /* Don't want to clobber errno with ENOMEM. */
663 morecore_recursing = 1; 674 morecore_recursing = 1;
664 newinfo = (malloc_info *) _realloc_internal 675 newinfo = (malloc_info *) _realloc_internal_nolock
665 (_heapinfo, newsize * sizeof (malloc_info)); 676 (_heapinfo, newsize * sizeof (malloc_info));
666 morecore_recursing = 0; 677 morecore_recursing = 0;
667 if (newinfo == NULL) 678 if (newinfo == NULL)
668 errno = save; 679 errno = save;
669 else 680 else
715 register_heapinfo (); 726 register_heapinfo ();
716 727
717 /* Reset _heaplimit so _free_internal never decides 728 /* Reset _heaplimit so _free_internal never decides
718 it can relocate or resize the info table. */ 729 it can relocate or resize the info table. */
719 _heaplimit = 0; 730 _heaplimit = 0;
720 _free_internal (oldinfo); 731 _free_internal_nolock (oldinfo);
721 PROTECT_MALLOC_STATE (0); 732 PROTECT_MALLOC_STATE (0);
722 733
723 /* The new heap limit includes the new table just allocated. */ 734 /* The new heap limit includes the new table just allocated. */
724 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info)); 735 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
725 return result; 736 return result;
730 return result; 741 return result;
731 } 742 }
732 743
733 /* Allocate memory from the heap. */ 744 /* Allocate memory from the heap. */
734 __ptr_t 745 __ptr_t
735 _malloc_internal (size) 746 _malloc_internal_nolock (size)
736 __malloc_size_t size; 747 __malloc_size_t size;
737 { 748 {
738 __ptr_t result; 749 __ptr_t result;
739 __malloc_size_t block, blocks, lastblocks, start; 750 __malloc_size_t block, blocks, lastblocks, start;
740 register __malloc_size_t i; 751 register __malloc_size_t i;
750 #if 0 761 #if 0
751 if (size == 0) 762 if (size == 0)
752 return NULL; 763 return NULL;
753 #endif 764 #endif
754 765
755 LOCK ();
756 PROTECT_MALLOC_STATE (0); 766 PROTECT_MALLOC_STATE (0);
757 767
758 if (size < sizeof (struct list)) 768 if (size < sizeof (struct list))
759 size = sizeof (struct list); 769 size = sizeof (struct list);
760 770
800 else 810 else
801 { 811 {
802 /* No free fragments of the desired size, so get a new block 812 /* No free fragments of the desired size, so get a new block
803 and break it into fragments, returning the first. */ 813 and break it into fragments, returning the first. */
804 #ifdef GC_MALLOC_CHECK 814 #ifdef GC_MALLOC_CHECK
805 result = _malloc_internal (BLOCKSIZE); 815 result = _malloc_internal_nolock (BLOCKSIZE);
806 PROTECT_MALLOC_STATE (0); 816 PROTECT_MALLOC_STATE (0);
817 #elif defined (USE_PTHREAD)
818 result = _malloc_internal_nolock (BLOCKSIZE);
807 #else 819 #else
808 result = malloc (BLOCKSIZE); 820 result = malloc (BLOCKSIZE);
809 #endif 821 #endif
810 if (result == NULL) 822 if (result == NULL)
811 { 823 {
872 _heapinfo[block].free.size += (wantblocks - lastblocks); 884 _heapinfo[block].free.size += (wantblocks - lastblocks);
873 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE; 885 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
874 _heaplimit += wantblocks - lastblocks; 886 _heaplimit += wantblocks - lastblocks;
875 continue; 887 continue;
876 } 888 }
877 result = morecore (wantblocks * BLOCKSIZE); 889 result = morecore_nolock (wantblocks * BLOCKSIZE);
878 if (result == NULL) 890 if (result == NULL)
879 goto out; 891 goto out;
880 block = BLOCK (result); 892 block = BLOCK (result);
881 /* Put the new block at the end of the free list. */ 893 /* Put the new block at the end of the free list. */
882 _heapinfo[block].free.size = wantblocks; 894 _heapinfo[block].free.size = wantblocks;
930 _heapinfo[block + blocks].busy.info.size = -blocks; 942 _heapinfo[block + blocks].busy.info.size = -blocks;
931 } 943 }
932 944
933 PROTECT_MALLOC_STATE (1); 945 PROTECT_MALLOC_STATE (1);
934 out: 946 out:
947 return result;
948 }
949
950 __ptr_t
951 _malloc_internal (size)
952 __malloc_size_t size;
953 {
954 __ptr_t result;
955
956 LOCK ();
957 result = _malloc_internal_nolock (size);
935 UNLOCK (); 958 UNLOCK ();
959
936 return result; 960 return result;
937 } 961 }
938 962
939 __ptr_t 963 __ptr_t
940 malloc (size) 964 malloc (size)
941 __malloc_size_t size; 965 __malloc_size_t size;
942 { 966 {
967 __ptr_t (*hook) (__malloc_size_t);
968
943 if (!__malloc_initialized && !__malloc_initialize ()) 969 if (!__malloc_initialized && !__malloc_initialize ())
944 return NULL; 970 return NULL;
945 971
946 return (__malloc_hook != NULL ? *__malloc_hook : _malloc_internal) (size); 972 /* Copy the value of __malloc_hook to an automatic variable in case
973 __malloc_hook is modified in another thread between its
974 NULL-check and the use.
975
976 Note: Strictly speaking, this is not a right solution. We should
977 use mutexes to access non-read-only variables that are shared
978 among multiple threads. We just leave it for compatibility with
979 glibc malloc (i.e., assignments to __malloc_hook) for now. */
980 hook = __malloc_hook;
981 return (hook != NULL ? *hook : _malloc_internal) (size);
947 } 982 }
948 983
949 #ifndef _LIBC 984 #ifndef _LIBC
950 985
951 /* On some ANSI C systems, some libc functions call _malloc, _free 986 /* On some ANSI C systems, some libc functions call _malloc, _free
1022 1057
1023 /* List of blocks allocated by memalign. */ 1058 /* List of blocks allocated by memalign. */
1024 struct alignlist *_aligned_blocks = NULL; 1059 struct alignlist *_aligned_blocks = NULL;
1025 1060
1026 /* Return memory to the heap. 1061 /* Return memory to the heap.
1027 Like `free' but don't call a __free_hook if there is one. */ 1062 Like `_free_internal' but don't lock mutex. */
1028 void 1063 void
1029 _free_internal (ptr) 1064 _free_internal_nolock (ptr)
1030 __ptr_t ptr; 1065 __ptr_t ptr;
1031 { 1066 {
1032 int type; 1067 int type;
1033 __malloc_size_t block, blocks; 1068 __malloc_size_t block, blocks;
1034 register __malloc_size_t i; 1069 register __malloc_size_t i;
1041 register struct alignlist *l; 1076 register struct alignlist *l;
1042 1077
1043 if (ptr == NULL) 1078 if (ptr == NULL)
1044 return; 1079 return;
1045 1080
1046 LOCK ();
1047 PROTECT_MALLOC_STATE (0); 1081 PROTECT_MALLOC_STATE (0);
1048 1082
1083 LOCK_ALIGNED_BLOCKS ();
1049 for (l = _aligned_blocks; l != NULL; l = l->next) 1084 for (l = _aligned_blocks; l != NULL; l = l->next)
1050 if (l->aligned == ptr) 1085 if (l->aligned == ptr)
1051 { 1086 {
1052 l->aligned = NULL; /* Mark the slot in the list as free. */ 1087 l->aligned = NULL; /* Mark the slot in the list as free. */
1053 ptr = l->exact; 1088 ptr = l->exact;
1054 break; 1089 break;
1055 } 1090 }
1091 UNLOCK_ALIGNED_BLOCKS ();
1056 1092
1057 block = BLOCK (ptr); 1093 block = BLOCK (ptr);
1058 1094
1059 type = _heapinfo[block].busy.type; 1095 type = _heapinfo[block].busy.type;
1060 switch (type) 1096 switch (type)
1156 /* Free the old info table, clearing _heaplimit to avoid 1192 /* Free the old info table, clearing _heaplimit to avoid
1157 recursion into this code. We don't want to return the 1193 recursion into this code. We don't want to return the
1158 table's blocks to the system before we have copied them to 1194 table's blocks to the system before we have copied them to
1159 the new location. */ 1195 the new location. */
1160 _heaplimit = 0; 1196 _heaplimit = 0;
1161 _free_internal (_heapinfo); 1197 _free_internal_nolock (_heapinfo);
1162 _heaplimit = oldlimit; 1198 _heaplimit = oldlimit;
1163 1199
1164 /* Tell malloc to search from the beginning of the heap for 1200 /* Tell malloc to search from the beginning of the heap for
1165 free blocks, so it doesn't reuse the ones just freed. */ 1201 free blocks, so it doesn't reuse the ones just freed. */
1166 _heapindex = 0; 1202 _heapindex = 0;
1167 1203
1168 /* Allocate new space for the info table and move its data. */ 1204 /* Allocate new space for the info table and move its data. */
1169 newinfo = (malloc_info *) _malloc_internal (info_blocks 1205 newinfo = (malloc_info *) _malloc_internal_nolock (info_blocks
1170 * BLOCKSIZE); 1206 * BLOCKSIZE);
1171 PROTECT_MALLOC_STATE (0); 1207 PROTECT_MALLOC_STATE (0);
1172 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE); 1208 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1173 _heapinfo = newinfo; 1209 _heapinfo = newinfo;
1174 1210
1175 /* We should now have coalesced the free block with the 1211 /* We should now have coalesced the free block with the
1228 ++_chunks_used; 1264 ++_chunks_used;
1229 _bytes_used += BLOCKSIZE; 1265 _bytes_used += BLOCKSIZE;
1230 _chunks_free -= BLOCKSIZE >> type; 1266 _chunks_free -= BLOCKSIZE >> type;
1231 _bytes_free -= BLOCKSIZE; 1267 _bytes_free -= BLOCKSIZE;
1232 1268
1233 #ifdef GC_MALLOC_CHECK 1269 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1234 _free_internal (ADDRESS (block)); 1270 _free_internal_nolock (ADDRESS (block));
1235 #else 1271 #else
1236 free (ADDRESS (block)); 1272 free (ADDRESS (block));
1237 #endif 1273 #endif
1238 } 1274 }
1239 else if (_heapinfo[block].busy.info.frag.nfree != 0) 1275 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1267 } 1303 }
1268 break; 1304 break;
1269 } 1305 }
1270 1306
1271 PROTECT_MALLOC_STATE (1); 1307 PROTECT_MALLOC_STATE (1);
1308 }
1309
1310 /* Return memory to the heap.
1311 Like `free' but don't call a __free_hook if there is one. */
1312 void
1313 _free_internal (ptr)
1314 __ptr_t ptr;
1315 {
1316 LOCK ();
1317 _free_internal_nolock (ptr);
1272 UNLOCK (); 1318 UNLOCK ();
1273 } 1319 }
1274 1320
1275 /* Return memory to the heap. */ 1321 /* Return memory to the heap. */
1276 1322
1277 FREE_RETURN_TYPE 1323 FREE_RETURN_TYPE
1278 free (ptr) 1324 free (ptr)
1279 __ptr_t ptr; 1325 __ptr_t ptr;
1280 { 1326 {
1281 if (__free_hook != NULL) 1327 void (*hook) (__ptr_t) = __free_hook;
1282 (*__free_hook) (ptr); 1328
1329 if (hook != NULL)
1330 (*hook) (ptr);
1283 else 1331 else
1284 _free_internal (ptr); 1332 _free_internal (ptr);
1285 } 1333 }
1286 1334
1287 /* Define the `cfree' alias for `free'. */ 1335 /* Define the `cfree' alias for `free'. */
1413 some benchmarks seem to indicate that greater compactness is 1461 some benchmarks seem to indicate that greater compactness is
1414 achieved by unconditionally allocating and copying to a 1462 achieved by unconditionally allocating and copying to a
1415 new region. This module has incestuous knowledge of the 1463 new region. This module has incestuous knowledge of the
1416 internals of both free and malloc. */ 1464 internals of both free and malloc. */
1417 __ptr_t 1465 __ptr_t
1418 _realloc_internal (ptr, size) 1466 _realloc_internal_nolock (ptr, size)
1419 __ptr_t ptr; 1467 __ptr_t ptr;
1420 __malloc_size_t size; 1468 __malloc_size_t size;
1421 { 1469 {
1422 __ptr_t result; 1470 __ptr_t result;
1423 int type; 1471 int type;
1424 __malloc_size_t block, blocks, oldlimit; 1472 __malloc_size_t block, blocks, oldlimit;
1425 1473
1426 if (size == 0) 1474 if (size == 0)
1427 { 1475 {
1428 _free_internal (ptr); 1476 _free_internal_nolock (ptr);
1429 return _malloc_internal (0); 1477 return _malloc_internal_nolock (0);
1430 } 1478 }
1431 else if (ptr == NULL) 1479 else if (ptr == NULL)
1432 return _malloc_internal (size); 1480 return _malloc_internal_nolock (size);
1433 1481
1434 block = BLOCK (ptr); 1482 block = BLOCK (ptr);
1435 1483
1436 LOCK ();
1437 PROTECT_MALLOC_STATE (0); 1484 PROTECT_MALLOC_STATE (0);
1438 1485
1439 type = _heapinfo[block].busy.type; 1486 type = _heapinfo[block].busy.type;
1440 switch (type) 1487 switch (type)
1441 { 1488 {
1442 case 0: 1489 case 0:
1443 /* Maybe reallocate a large block to a small fragment. */ 1490 /* Maybe reallocate a large block to a small fragment. */
1444 if (size <= BLOCKSIZE / 2) 1491 if (size <= BLOCKSIZE / 2)
1445 { 1492 {
1446 result = _malloc_internal (size); 1493 result = _malloc_internal_nolock (size);
1447 if (result != NULL) 1494 if (result != NULL)
1448 { 1495 {
1449 memcpy (result, ptr, size); 1496 memcpy (result, ptr, size);
1450 _free_internal (ptr); 1497 _free_internal_nolock (ptr);
1451 goto out; 1498 goto out;
1452 } 1499 }
1453 } 1500 }
1454 1501
1455 /* The new size is a large allocation as well; 1502 /* The new size is a large allocation as well;
1465 _heapinfo[block].busy.info.size = blocks; 1512 _heapinfo[block].busy.info.size = blocks;
1466 /* We have just created a new chunk by splitting a chunk in two. 1513 /* We have just created a new chunk by splitting a chunk in two.
1467 Now we will free this chunk; increment the statistics counter 1514 Now we will free this chunk; increment the statistics counter
1468 so it doesn't become wrong when _free_internal decrements it. */ 1515 so it doesn't become wrong when _free_internal decrements it. */
1469 ++_chunks_used; 1516 ++_chunks_used;
1470 _free_internal (ADDRESS (block + blocks)); 1517 _free_internal_nolock (ADDRESS (block + blocks));
1471 result = ptr; 1518 result = ptr;
1472 } 1519 }
1473 else if (blocks == _heapinfo[block].busy.info.size) 1520 else if (blocks == _heapinfo[block].busy.info.size)
1474 /* No size change necessary. */ 1521 /* No size change necessary. */
1475 result = ptr; 1522 result = ptr;
1480 adjacent free space to grow without moving. */ 1527 adjacent free space to grow without moving. */
1481 blocks = _heapinfo[block].busy.info.size; 1528 blocks = _heapinfo[block].busy.info.size;
1482 /* Prevent free from actually returning memory to the system. */ 1529 /* Prevent free from actually returning memory to the system. */
1483 oldlimit = _heaplimit; 1530 oldlimit = _heaplimit;
1484 _heaplimit = 0; 1531 _heaplimit = 0;
1485 _free_internal (ptr); 1532 _free_internal_nolock (ptr);
1486 result = _malloc_internal (size); 1533 result = _malloc_internal_nolock (size);
1487 PROTECT_MALLOC_STATE (0); 1534 PROTECT_MALLOC_STATE (0);
1488 if (_heaplimit == 0) 1535 if (_heaplimit == 0)
1489 _heaplimit = oldlimit; 1536 _heaplimit = oldlimit;
1490 if (result == NULL) 1537 if (result == NULL)
1491 { 1538 {
1492 /* Now we're really in trouble. We have to unfree 1539 /* Now we're really in trouble. We have to unfree
1493 the thing we just freed. Unfortunately it might 1540 the thing we just freed. Unfortunately it might
1494 have been coalesced with its neighbors. */ 1541 have been coalesced with its neighbors. */
1495 if (_heapindex == block) 1542 if (_heapindex == block)
1496 (void) _malloc_internal (blocks * BLOCKSIZE); 1543 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1497 else 1544 else
1498 { 1545 {
1499 __ptr_t previous 1546 __ptr_t previous
1500 = _malloc_internal ((block - _heapindex) * BLOCKSIZE); 1547 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1501 (void) _malloc_internal (blocks * BLOCKSIZE); 1548 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1502 _free_internal (previous); 1549 _free_internal_nolock (previous);
1503 } 1550 }
1504 goto out; 1551 goto out;
1505 } 1552 }
1506 if (ptr != result) 1553 if (ptr != result)
1507 memmove (result, ptr, blocks * BLOCKSIZE); 1554 memmove (result, ptr, blocks * BLOCKSIZE);
1517 result = ptr; 1564 result = ptr;
1518 else 1565 else
1519 { 1566 {
1520 /* The new size is different; allocate a new space, 1567 /* The new size is different; allocate a new space,
1521 and copy the lesser of the new size and the old. */ 1568 and copy the lesser of the new size and the old. */
1522 result = _malloc_internal (size); 1569 result = _malloc_internal_nolock (size);
1523 if (result == NULL) 1570 if (result == NULL)
1524 goto out; 1571 goto out;
1525 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type)); 1572 memcpy (result, ptr, min (size, (__malloc_size_t) 1 << type));
1526 _free_internal (ptr); 1573 _free_internal_nolock (ptr);
1527 } 1574 }
1528 break; 1575 break;
1529 } 1576 }
1530 1577
1531 PROTECT_MALLOC_STATE (1); 1578 PROTECT_MALLOC_STATE (1);
1532 out: 1579 out:
1580 return result;
1581 }
1582
1583 __ptr_t
1584 _realloc_internal (ptr, size)
1585 __ptr_t ptr;
1586 __malloc_size_t size;
1587 {
1588 __ptr_t result;
1589
1590 LOCK();
1591 result = _realloc_internal_nolock (ptr, size);
1533 UNLOCK (); 1592 UNLOCK ();
1593
1534 return result; 1594 return result;
1535 } 1595 }
1536 1596
1537 __ptr_t 1597 __ptr_t
1538 realloc (ptr, size) 1598 realloc (ptr, size)
1539 __ptr_t ptr; 1599 __ptr_t ptr;
1540 __malloc_size_t size; 1600 __malloc_size_t size;
1541 { 1601 {
1602 __ptr_t (*hook) (__ptr_t, __malloc_size_t);
1603
1542 if (!__malloc_initialized && !__malloc_initialize ()) 1604 if (!__malloc_initialized && !__malloc_initialize ())
1543 return NULL; 1605 return NULL;
1544 1606
1545 return (__realloc_hook != NULL ? *__realloc_hook : _realloc_internal) 1607 hook = __realloc_hook;
1546 (ptr, size); 1608 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1547 } 1609 }
1548 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc. 1610 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
1549 1611
1550 This library is free software; you can redistribute it and/or 1612 This library is free software; you can redistribute it and/or
1551 modify it under the terms of the GNU General Public License as 1613 modify it under the terms of the GNU General Public License as
1679 __malloc_size_t alignment; 1741 __malloc_size_t alignment;
1680 __malloc_size_t size; 1742 __malloc_size_t size;
1681 { 1743 {
1682 __ptr_t result; 1744 __ptr_t result;
1683 unsigned long int adj, lastadj; 1745 unsigned long int adj, lastadj;
1684 1746 __ptr_t (*hook) (__malloc_size_t, __malloc_size_t) = __memalign_hook;
1685 if (__memalign_hook) 1747
1686 return (*__memalign_hook) (alignment, size); 1748 if (hook)
1749 return (*hook) (alignment, size);
1687 1750
1688 /* Allocate a block with enough extra space to pad the block with up to 1751 /* Allocate a block with enough extra space to pad the block with up to
1689 (ALIGNMENT - 1) bytes if necessary. */ 1752 (ALIGNMENT - 1) bytes if necessary. */
1690 result = malloc (size + alignment - 1); 1753 result = malloc (size + alignment - 1);
1691 if (result == NULL) 1754 if (result == NULL)
1716 /* Record this block in the list of aligned blocks, so that `free' 1779 /* Record this block in the list of aligned blocks, so that `free'
1717 can identify the pointer it is passed, which will be in the middle 1780 can identify the pointer it is passed, which will be in the middle
1718 of an allocated block. */ 1781 of an allocated block. */
1719 1782
1720 struct alignlist *l; 1783 struct alignlist *l;
1784 LOCK_ALIGNED_BLOCKS ();
1721 for (l = _aligned_blocks; l != NULL; l = l->next) 1785 for (l = _aligned_blocks; l != NULL; l = l->next)
1722 if (l->aligned == NULL) 1786 if (l->aligned == NULL)
1723 /* This slot is free. Use it. */ 1787 /* This slot is free. Use it. */
1724 break; 1788 break;
1725 if (l == NULL) 1789 if (l == NULL)
1726 { 1790 {
1727 l = (struct alignlist *) malloc (sizeof (struct alignlist)); 1791 l = (struct alignlist *) malloc (sizeof (struct alignlist));
1728 if (l == NULL) 1792 if (l != NULL)
1729 { 1793 {
1730 free (result); 1794 l->next = _aligned_blocks;
1731 return NULL; 1795 _aligned_blocks = l;
1732 } 1796 }
1733 l->next = _aligned_blocks; 1797 }
1734 _aligned_blocks = l; 1798 if (l != NULL)
1735 } 1799 {
1736 l->exact = result; 1800 l->exact = result;
1737 result = l->aligned = (char *) result + alignment - adj; 1801 result = l->aligned = (char *) result + alignment - adj;
1802 }
1803 UNLOCK_ALIGNED_BLOCKS ();
1804 if (l == NULL)
1805 {
1806 free (result);
1807 result = NULL;
1808 }
1738 } 1809 }
1739 1810
1740 return result; 1811 return result;
1741 } 1812 }
1742 1813