This source file includes following definitions.
- protect_malloc_state
- align
- get_contiguous_space
- register_heapinfo
- malloc_atfork_handler_prepare
- malloc_atfork_handler_parent
- malloc_atfork_handler_child
- malloc_enable_thread
- malloc_initialize_1
- __malloc_initialize
- morecore_nolock
- _malloc_internal_nolock
- _malloc_internal
- malloc
- _malloc
- _free
- _realloc
- _free_internal_nolock
- _free_internal
- free
- weak_alias
- _realloc_internal_nolock
- _realloc_internal
- realloc
- calloc
- __default_morecore
- aligned_alloc
- memalign
- posix_memalign
- valloc
- hybrid_malloc
- hybrid_calloc
- hybrid_free
- hybrid_aligned_alloc
- hybrid_realloc
- hybrid_get_current_dir_name
- checkhdr
- freehook
- mallochook
- reallochook
- mabort
- mcheck
- mprobe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include <config.h>
23
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
25 #define USE_PTHREAD
26 #endif
27
28 #include <string.h>
29 #include <limits.h>
30 #include <stdint.h>
31
32 #ifdef HYBRID_GET_CURRENT_DIR_NAME
33 #undef get_current_dir_name
34 #endif
35
36 #include <unistd.h>
37
38 #ifdef USE_PTHREAD
39 #include <pthread.h>
40 #endif
41
42 #ifdef WINDOWSNT
43 #include <w32heap.h>
44 #endif
45
46 #ifdef emacs
47 extern void emacs_abort (void);
48 #endif
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63 #ifdef HYBRID_MALLOC
64 #undef malloc
65 #undef realloc
66 #undef calloc
67 #undef free
68 #define malloc gmalloc
69 #define realloc grealloc
70 #define calloc gcalloc
71 #define aligned_alloc galigned_alloc
72 #define free gfree
73 #endif
74
75 #ifdef CYGWIN
76 extern void *bss_sbrk (ptrdiff_t size);
77 extern int bss_sbrk_did_unexec;
78 extern char bss_sbrk_buffer[];
79 extern void *bss_sbrk_buffer_end;
80 #define DUMPED bss_sbrk_did_unexec
81 #define ALLOCATED_BEFORE_DUMPING(P) \
82 ((P) < bss_sbrk_buffer_end && (P) >= (void *) bss_sbrk_buffer)
83 #endif
84
85 #ifdef __cplusplus
86 extern "C"
87 {
88 #endif
89
90 #include <stddef.h>
91
92
93
94 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
95
96
97 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
98
99 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
100
101 extern void free (void *ptr);
102
103
104 #ifdef MSDOS
105 extern void *aligned_alloc (size_t, size_t);
106 extern void *memalign (size_t, size_t);
107 extern int posix_memalign (void **, size_t, size_t);
108 #endif
109
110 #ifdef USE_PTHREAD
111
112 extern void malloc_enable_thread (void);
113 #endif
114
115 #ifdef emacs
116 extern void emacs_abort (void);
117 #endif
118
119
120
121
122
123
124 #define INT_BIT (CHAR_BIT * sizeof (int))
125 #define BLOCKLOG (INT_BIT > 16 ? 12 : 9)
126 #define BLOCKSIZE (1 << BLOCKLOG)
127 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
128
129
130
131 #define HEAP (INT_BIT > 16 ? 4194304 : 65536)
132
133
134
135 #define FINAL_FREE_BLOCKS 8
136
137
138 typedef union
139 {
140
141 struct
142 {
143
144
145 int type;
146 union
147 {
148 struct
149 {
150 size_t nfree;
151 size_t first;
152 } frag;
153
154
155
156 ptrdiff_t size;
157 } info;
158 } busy;
159
160
161 struct
162 {
163 size_t size;
164 size_t next;
165 size_t prev;
166 } free;
167 } malloc_info;
168
169
170 extern char *_heapbase;
171
172
173 extern malloc_info *_heapinfo;
174
175
176 #define BLOCK(A) (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
177 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
178
179
180 extern size_t _heapindex;
181
182
183 extern size_t _heaplimit;
184
185
186 struct list
187 {
188 struct list *next;
189 struct list *prev;
190 };
191
192
193 extern struct list _fraghead[];
194
195
196 struct alignlist
197 {
198 struct alignlist *next;
199 void *aligned;
200 void *exact;
201 };
202 extern struct alignlist *_aligned_blocks;
203
204
205 extern size_t _chunks_used;
206 extern size_t _bytes_used;
207 extern size_t _chunks_free;
208 extern size_t _bytes_free;
209
210
211
212
213 extern void *_malloc_internal (size_t);
214 extern void *_realloc_internal (void *, size_t);
215 extern void _free_internal (void *);
216 extern void *_malloc_internal_nolock (size_t);
217 extern void *_realloc_internal_nolock (void *, size_t);
218 extern void _free_internal_nolock (void *);
219
220 #ifdef USE_PTHREAD
221 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
222 extern int _malloc_thread_enabled_p;
223 #define LOCK() \
224 do { \
225 if (_malloc_thread_enabled_p) \
226 pthread_mutex_lock (&_malloc_mutex); \
227 } while (0)
228 #define UNLOCK() \
229 do { \
230 if (_malloc_thread_enabled_p) \
231 pthread_mutex_unlock (&_malloc_mutex); \
232 } while (0)
233 #define LOCK_ALIGNED_BLOCKS() \
234 do { \
235 if (_malloc_thread_enabled_p) \
236 pthread_mutex_lock (&_aligned_blocks_mutex); \
237 } while (0)
238 #define UNLOCK_ALIGNED_BLOCKS() \
239 do { \
240 if (_malloc_thread_enabled_p) \
241 pthread_mutex_unlock (&_aligned_blocks_mutex); \
242 } while (0)
243 #else
244 #define LOCK()
245 #define UNLOCK()
246 #define LOCK_ALIGNED_BLOCKS()
247 #define UNLOCK_ALIGNED_BLOCKS()
248 #endif
249
250
251
252 extern void *malloc_find_object_address (void *ptr);
253
254
255
256 extern void *(*__morecore) (ptrdiff_t size);
257
258
259 extern void *__default_morecore (ptrdiff_t size);
260
261
262
263 extern void (*__after_morecore_hook) (void);
264
265
266
267 extern size_t __malloc_extra_blocks;
268
269
270 extern int __malloc_initialized;
271
272 extern int __malloc_initialize (void);
273
274
275 extern void (*__malloc_initialize_hook) (void);
276 extern void (*__free_hook) (void *ptr);
277 extern void *(*__malloc_hook) (size_t size);
278 extern void *(*__realloc_hook) (void *ptr, size_t size);
279 extern void *(*__memalign_hook) (size_t size, size_t alignment);
280
281
282
283 enum mcheck_status
284 {
285 MCHECK_DISABLED = -1,
286 MCHECK_OK,
287 MCHECK_FREE,
288 MCHECK_HEAD,
289 MCHECK_TAIL
290 };
291
292
293
294
295
296 extern int mcheck (void (*abortfunc) (enum mcheck_status));
297
298
299
300
301 extern enum mcheck_status mprobe (void *ptr);
302
303
304 extern void mtrace (void);
305 extern void muntrace (void);
306
307
308 struct mstats
309 {
310 size_t bytes_total;
311 size_t chunks_used;
312 size_t bytes_used;
313 size_t chunks_free;
314 size_t bytes_free;
315 };
316
317
318 extern struct mstats mstats (void);
319
320
321 extern void memory_warnings (void *start, void (*warnfun) (const char *));
322
323 #ifdef __cplusplus
324 }
325 #endif
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347 #include <errno.h>
348
349 void *(*__morecore) (ptrdiff_t size) = __default_morecore;
350
351
352 void *(*__malloc_hook) (size_t size);
353
354
355 char *_heapbase;
356
357
358 malloc_info *_heapinfo;
359
360
361 static size_t heapsize;
362
363
364 size_t _heapindex;
365
366
367 size_t _heaplimit;
368
369
370 struct list _fraghead[BLOCKLOG];
371
372
373 size_t _chunks_used;
374 size_t _bytes_used;
375 size_t _chunks_free;
376 size_t _bytes_free;
377
378
379 int __malloc_initialized;
380
381 size_t __malloc_extra_blocks;
382
383 void (*__malloc_initialize_hook) (void);
384 void (*__after_morecore_hook) (void);
385
386 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
387
388
389
390
391
392
393
394
395
396
397 #include <sys/types.h>
398 #include <sys/mman.h>
399
400 static int state_protected_p;
401 static size_t last_state_size;
402 static malloc_info *last_heapinfo;
403
404 void
405 protect_malloc_state (int protect_p)
406 {
407
408
409 if (_heapinfo != last_heapinfo
410 && last_heapinfo
411 && state_protected_p)
412 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
413
414 last_state_size = _heaplimit * sizeof *_heapinfo;
415 last_heapinfo = _heapinfo;
416
417 if (protect_p != state_protected_p)
418 {
419 state_protected_p = protect_p;
420 if (mprotect (_heapinfo, last_state_size,
421 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
422 abort ();
423 }
424 }
425
426 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
427
428 #else
429 #define PROTECT_MALLOC_STATE(PROT)
430 #endif
431
432
433
434 static void *
435 align (size_t size)
436 {
437 void *result;
438 ptrdiff_t adj;
439
440
441
442
443
444 if (PTRDIFF_MAX < size)
445 result = 0;
446 else
447 result = (*__morecore) (size);
448 adj = (uintptr_t) result % BLOCKSIZE;
449 if (adj != 0)
450 {
451 adj = BLOCKSIZE - adj;
452 (*__morecore) (adj);
453 result = (char *) result + adj;
454 }
455
456 if (__after_morecore_hook)
457 (*__after_morecore_hook) ();
458
459 return result;
460 }
461
462
463
464
465 static void *
466 get_contiguous_space (ptrdiff_t size, void *position)
467 {
468 void *before;
469 void *after;
470
471 before = (*__morecore) (0);
472
473
474 if (before != position)
475 return 0;
476
477
478 after = (*__morecore) (size);
479 if (!after)
480 return 0;
481
482
483 if (after != position)
484 {
485 (*__morecore) (- size);
486 return 0;
487 }
488
489 return after;
490 }
491
492
493
494
495
496 static void
497 register_heapinfo (void)
498 {
499 size_t block, blocks;
500
501 block = BLOCK (_heapinfo);
502 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
503
504
505 _bytes_used += blocks * BLOCKSIZE;
506 ++_chunks_used;
507
508
509 _heapinfo[block].busy.type = 0;
510 _heapinfo[block].busy.info.size = blocks;
511
512 while (--blocks > 0)
513 _heapinfo[block + blocks].busy.info.size = -blocks;
514 }
515
516 #ifdef USE_PTHREAD
517 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
518 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
519 int _malloc_thread_enabled_p;
520
521 static void
522 malloc_atfork_handler_prepare (void)
523 {
524 LOCK ();
525 LOCK_ALIGNED_BLOCKS ();
526 }
527
528 static void
529 malloc_atfork_handler_parent (void)
530 {
531 UNLOCK_ALIGNED_BLOCKS ();
532 UNLOCK ();
533 }
534
535 static void
536 malloc_atfork_handler_child (void)
537 {
538 UNLOCK_ALIGNED_BLOCKS ();
539 UNLOCK ();
540 }
541
542
543 void
544 malloc_enable_thread (void)
545 {
546 if (_malloc_thread_enabled_p)
547 return;
548
549
550
551
552
553 pthread_mutex_init (&_malloc_mutex, NULL);
554 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
555 pthread_atfork (malloc_atfork_handler_prepare,
556 malloc_atfork_handler_parent,
557 malloc_atfork_handler_child);
558 _malloc_thread_enabled_p = 1;
559 }
560 #endif
561
562 static void
563 malloc_initialize_1 (void)
564 {
565 #ifdef GC_MCHECK
566 mcheck (NULL);
567 #endif
568
569 if (__malloc_initialize_hook)
570 (*__malloc_initialize_hook) ();
571
572 heapsize = HEAP / BLOCKSIZE;
573 _heapinfo = align (heapsize * sizeof (malloc_info));
574 if (_heapinfo == NULL)
575 return;
576 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
577 _heapinfo[0].free.size = 0;
578 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
579 _heapindex = 0;
580 _heapbase = (char *) _heapinfo;
581 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
582
583 register_heapinfo ();
584
585 __malloc_initialized = 1;
586 PROTECT_MALLOC_STATE (1);
587 return;
588 }
589
590
591
592
593 int
594 __malloc_initialize (void)
595 {
596 if (__malloc_initialized)
597 return 0;
598
599 malloc_initialize_1 ();
600
601 return __malloc_initialized;
602 }
603
604 static int morecore_recursing;
605
606
607
608 static void *
609 morecore_nolock (size_t size)
610 {
611 void *result;
612 malloc_info *newinfo, *oldinfo;
613 size_t newsize;
614
615 if (morecore_recursing)
616
617 return NULL;
618
619 result = align (size);
620 if (result == NULL)
621 return NULL;
622
623 PROTECT_MALLOC_STATE (0);
624
625
626 if ((size_t) BLOCK ((char *) result + size) > heapsize)
627 {
628
629
630
631
632 newsize = heapsize;
633 do
634 newsize *= 2;
635 while ((size_t) BLOCK ((char *) result + size) > newsize);
636
637
638
639
640
641
642 if (_heaplimit != 0)
643 {
644
645
646
647
648
649 int save = errno;
650 morecore_recursing = 1;
651 newinfo = _realloc_internal_nolock (_heapinfo,
652 newsize * sizeof (malloc_info));
653 morecore_recursing = 0;
654 if (newinfo == NULL)
655 errno = save;
656 else
657 {
658
659
660
661 memset (&newinfo[heapsize], 0,
662 (newsize - heapsize) * sizeof (malloc_info));
663 _heapinfo = newinfo;
664 heapsize = newsize;
665 goto got_heap;
666 }
667 }
668
669
670 while (1)
671 {
672 newinfo = align (newsize * sizeof (malloc_info));
673
674
675 if (newinfo == NULL)
676 {
677 (*__morecore) (-size);
678 return NULL;
679 }
680
681
682
683 if ((size_t) BLOCK ((char *) newinfo
684 + newsize * sizeof (malloc_info))
685 < newsize)
686 break;
687
688
689 (*__morecore) (- newsize * sizeof (malloc_info));
690 newsize *= 2;
691 }
692
693
694
695 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
696 memset (&newinfo[heapsize], 0,
697 (newsize - heapsize) * sizeof (malloc_info));
698 oldinfo = _heapinfo;
699 _heapinfo = newinfo;
700 heapsize = newsize;
701
702 register_heapinfo ();
703
704
705
706 _heaplimit = 0;
707 _free_internal_nolock (oldinfo);
708 PROTECT_MALLOC_STATE (0);
709
710
711 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
712 return result;
713 }
714
715 got_heap:
716 _heaplimit = BLOCK ((char *) result + size);
717 return result;
718 }
719
720
721 void *
722 _malloc_internal_nolock (size_t size)
723 {
724 void *result;
725 size_t block, blocks, lastblocks, start;
726 register size_t i;
727 struct list *next;
728
729
730
731
732
733
734
735
736 #if 0
737 if (size == 0)
738 return NULL;
739 #endif
740
741 PROTECT_MALLOC_STATE (0);
742
743 if (size < sizeof (struct list))
744 size = sizeof (struct list);
745
746
747 if (size <= BLOCKSIZE / 2)
748 {
749
750
751 register size_t log = 1;
752 --size;
753 while ((size /= 2) != 0)
754 ++log;
755
756
757
758 next = _fraghead[log].next;
759 if (next != NULL)
760 {
761
762
763
764 result = next;
765 next->prev->next = next->next;
766 if (next->next != NULL)
767 next->next->prev = next->prev;
768 block = BLOCK (result);
769 if (--_heapinfo[block].busy.info.frag.nfree != 0)
770 _heapinfo[block].busy.info.frag.first =
771 (uintptr_t) next->next % BLOCKSIZE >> log;
772
773
774 ++_chunks_used;
775 _bytes_used += 1 << log;
776 --_chunks_free;
777 _bytes_free -= 1 << log;
778 }
779 else
780 {
781
782
783 #ifdef GC_MALLOC_CHECK
784 result = _malloc_internal_nolock (BLOCKSIZE);
785 PROTECT_MALLOC_STATE (0);
786 #elif defined (USE_PTHREAD)
787 result = _malloc_internal_nolock (BLOCKSIZE);
788 #else
789 result = malloc (BLOCKSIZE);
790 #endif
791 if (result == NULL)
792 {
793 PROTECT_MALLOC_STATE (1);
794 goto out;
795 }
796
797
798 next = (struct list *) ((char *) result + (1 << log));
799 next->next = NULL;
800 next->prev = &_fraghead[log];
801 _fraghead[log].next = next;
802
803 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
804 {
805 next = (struct list *) ((char *) result + (i << log));
806 next->next = _fraghead[log].next;
807 next->prev = &_fraghead[log];
808 next->prev->next = next;
809 next->next->prev = next;
810 }
811
812
813 block = BLOCK (result);
814 _heapinfo[block].busy.type = log;
815 _heapinfo[block].busy.info.frag.nfree = i - 1;
816 _heapinfo[block].busy.info.frag.first = i - 1;
817
818 _chunks_free += (BLOCKSIZE >> log) - 1;
819 _bytes_free += BLOCKSIZE - (1 << log);
820 _bytes_used -= BLOCKSIZE - (1 << log);
821 }
822 }
823 else
824 {
825
826
827
828
829 blocks = BLOCKIFY (size);
830 start = block = _heapindex;
831 while (_heapinfo[block].free.size < blocks)
832 {
833 block = _heapinfo[block].free.next;
834 if (block == start)
835 {
836
837 size_t wantblocks = blocks + __malloc_extra_blocks;
838 block = _heapinfo[0].free.prev;
839 lastblocks = _heapinfo[block].free.size;
840
841
842 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
843
844
845 block + wantblocks <= heapsize &&
846 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
847 ADDRESS (block + lastblocks)))
848 {
849
850
851
852 block = _heapinfo[0].free.prev;
853 _heapinfo[block].free.size += (wantblocks - lastblocks);
854 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
855 _heaplimit += wantblocks - lastblocks;
856 continue;
857 }
858 result = morecore_nolock (wantblocks * BLOCKSIZE);
859 if (result == NULL)
860 goto out;
861 block = BLOCK (result);
862
863 _heapinfo[block].free.size = wantblocks;
864 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
865 _heapinfo[block].free.next = 0;
866 _heapinfo[0].free.prev = block;
867 _heapinfo[_heapinfo[block].free.prev].free.next = block;
868 ++_chunks_free;
869
870 }
871 }
872
873
874
875 result = ADDRESS (block);
876 if (_heapinfo[block].free.size > blocks)
877 {
878
879
880 _heapinfo[block + blocks].free.size
881 = _heapinfo[block].free.size - blocks;
882 _heapinfo[block + blocks].free.next
883 = _heapinfo[block].free.next;
884 _heapinfo[block + blocks].free.prev
885 = _heapinfo[block].free.prev;
886 _heapinfo[_heapinfo[block].free.prev].free.next
887 = _heapinfo[_heapinfo[block].free.next].free.prev
888 = _heapindex = block + blocks;
889 }
890 else
891 {
892
893
894 _heapinfo[_heapinfo[block].free.next].free.prev
895 = _heapinfo[block].free.prev;
896 _heapinfo[_heapinfo[block].free.prev].free.next
897 = _heapindex = _heapinfo[block].free.next;
898 --_chunks_free;
899 }
900
901 _heapinfo[block].busy.type = 0;
902 _heapinfo[block].busy.info.size = blocks;
903 ++_chunks_used;
904 _bytes_used += blocks * BLOCKSIZE;
905 _bytes_free -= blocks * BLOCKSIZE;
906
907
908
909
910 while (--blocks > 0)
911 _heapinfo[block + blocks].busy.info.size = -blocks;
912 }
913
914 PROTECT_MALLOC_STATE (1);
915 out:
916 return result;
917 }
918
919 void *
920 _malloc_internal (size_t size)
921 {
922 void *result;
923
924 LOCK ();
925 result = _malloc_internal_nolock (size);
926 UNLOCK ();
927
928 return result;
929 }
930
931 void *
932 malloc (size_t size)
933 {
934 void *(*hook) (size_t);
935
936 if (!__malloc_initialized && !__malloc_initialize ())
937 return NULL;
938
939
940
941
942
943
944
945
946
947 hook = __malloc_hook;
948 return (hook != NULL ? *hook : _malloc_internal) (size);
949 }
950
951 #ifndef _LIBC
952
953
954
955
956 extern void *_malloc (size_t);
957 extern void _free (void *);
958 extern void *_realloc (void *, size_t);
959
960 void *
961 _malloc (size_t size)
962 {
963 return malloc (size);
964 }
965
966 void
967 _free (void *ptr)
968 {
969 free (ptr);
970 }
971
972 void *
973 _realloc (void *ptr, size_t size)
974 {
975 return realloc (ptr, size);
976 }
977
978 #endif
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001 void (*__free_hook) (void *__ptr);
1002
1003
1004 struct alignlist *_aligned_blocks = NULL;
1005
1006
1007
1008 void
1009 _free_internal_nolock (void *ptr)
1010 {
1011 int type;
1012 size_t block, blocks;
1013 register size_t i;
1014 struct list *prev, *next;
1015 void *curbrk;
1016 const size_t lesscore_threshold
1017
1018 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
1019
1020 register struct alignlist *l;
1021
1022 if (ptr == NULL)
1023 return;
1024
1025 PROTECT_MALLOC_STATE (0);
1026
1027 LOCK_ALIGNED_BLOCKS ();
1028 for (l = _aligned_blocks; l != NULL; l = l->next)
1029 if (l->aligned == ptr)
1030 {
1031 l->aligned = NULL;
1032 ptr = l->exact;
1033 break;
1034 }
1035 UNLOCK_ALIGNED_BLOCKS ();
1036
1037 block = BLOCK (ptr);
1038
1039 type = _heapinfo[block].busy.type;
1040 switch (type)
1041 {
1042 case 0:
1043
1044 --_chunks_used;
1045 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1046 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1047
1048
1049
1050
1051 i = _heapindex;
1052 if (i > block)
1053 while (i > block)
1054 i = _heapinfo[i].free.prev;
1055 else
1056 {
1057 do
1058 i = _heapinfo[i].free.next;
1059 while (i > 0 && i < block);
1060 i = _heapinfo[i].free.prev;
1061 }
1062
1063
1064 if (block == i + _heapinfo[i].free.size)
1065 {
1066
1067 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1068 block = i;
1069 }
1070 else
1071 {
1072
1073 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1074 _heapinfo[block].free.next = _heapinfo[i].free.next;
1075 _heapinfo[block].free.prev = i;
1076 _heapinfo[i].free.next = block;
1077 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1078 ++_chunks_free;
1079 }
1080
1081
1082
1083
1084 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1085 {
1086 _heapinfo[block].free.size
1087 += _heapinfo[_heapinfo[block].free.next].free.size;
1088 _heapinfo[block].free.next
1089 = _heapinfo[_heapinfo[block].free.next].free.next;
1090 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1091 --_chunks_free;
1092 }
1093
1094
1095 blocks = _heapinfo[block].free.size;
1096
1097
1098 curbrk = (*__morecore) (0);
1099
1100 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1101 {
1102
1103
1104
1105
1106 size_t info_block = BLOCK (_heapinfo);
1107 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1108 size_t prev_block = _heapinfo[block].free.prev;
1109 size_t prev_blocks = _heapinfo[prev_block].free.size;
1110 size_t next_block = _heapinfo[block].free.next;
1111 size_t next_blocks = _heapinfo[next_block].free.size;
1112
1113 if (
1114
1115
1116
1117 (block + blocks == _heaplimit &&
1118 info_block + info_blocks == block &&
1119 prev_block != 0 && prev_block + prev_blocks == info_block &&
1120 blocks + prev_blocks >= lesscore_threshold) ||
1121
1122
1123
1124
1125 (block + blocks == info_block &&
1126 ((info_block + info_blocks == _heaplimit &&
1127 blocks >= lesscore_threshold) ||
1128 (info_block + info_blocks == next_block &&
1129 next_block + next_blocks == _heaplimit &&
1130 blocks + next_blocks >= lesscore_threshold)))
1131 )
1132 {
1133 malloc_info *newinfo;
1134 size_t oldlimit = _heaplimit;
1135
1136
1137
1138
1139
1140 _heaplimit = 0;
1141 _free_internal_nolock (_heapinfo);
1142 _heaplimit = oldlimit;
1143
1144
1145
1146 _heapindex = 0;
1147
1148
1149 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1150 PROTECT_MALLOC_STATE (0);
1151 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1152 _heapinfo = newinfo;
1153
1154
1155
1156
1157
1158 block = _heapinfo[0].free.prev;
1159 blocks = _heapinfo[block].free.size;
1160 }
1161
1162
1163 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1164 {
1165 register size_t bytes = blocks * BLOCKSIZE;
1166 _heaplimit -= blocks;
1167 (*__morecore) (-bytes);
1168 _heapinfo[_heapinfo[block].free.prev].free.next
1169 = _heapinfo[block].free.next;
1170 _heapinfo[_heapinfo[block].free.next].free.prev
1171 = _heapinfo[block].free.prev;
1172 block = _heapinfo[block].free.prev;
1173 --_chunks_free;
1174 _bytes_free -= bytes;
1175 }
1176 }
1177
1178
1179 _heapindex = block;
1180 break;
1181
1182 default:
1183
1184 --_chunks_used;
1185 _bytes_used -= 1 << type;
1186 ++_chunks_free;
1187 _bytes_free += 1 << type;
1188
1189
1190 prev = (struct list *) ((char *) ADDRESS (block) +
1191 (_heapinfo[block].busy.info.frag.first << type));
1192
1193 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1194 {
1195
1196
1197 next = prev;
1198 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1199 next = next->next;
1200 prev->prev->next = next;
1201 if (next != NULL)
1202 next->prev = prev->prev;
1203 _heapinfo[block].busy.type = 0;
1204 _heapinfo[block].busy.info.size = 1;
1205
1206
1207 ++_chunks_used;
1208 _bytes_used += BLOCKSIZE;
1209 _chunks_free -= BLOCKSIZE >> type;
1210 _bytes_free -= BLOCKSIZE;
1211
1212 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1213 _free_internal_nolock (ADDRESS (block));
1214 #else
1215 free (ADDRESS (block));
1216 #endif
1217 }
1218 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1219 {
1220
1221
1222
1223 next = ptr;
1224 next->next = prev->next;
1225 next->prev = prev;
1226 prev->next = next;
1227 if (next->next != NULL)
1228 next->next->prev = next;
1229 ++_heapinfo[block].busy.info.frag.nfree;
1230 }
1231 else
1232 {
1233
1234
1235
1236 prev = ptr;
1237 _heapinfo[block].busy.info.frag.nfree = 1;
1238 _heapinfo[block].busy.info.frag.first =
1239 (uintptr_t) ptr % BLOCKSIZE >> type;
1240 prev->next = _fraghead[type].next;
1241 prev->prev = &_fraghead[type];
1242 prev->prev->next = prev;
1243 if (prev->next != NULL)
1244 prev->next->prev = prev;
1245 }
1246 break;
1247 }
1248
1249 PROTECT_MALLOC_STATE (1);
1250 }
1251
1252
1253
1254 void
1255 _free_internal (void *ptr)
1256 {
1257 LOCK ();
1258 _free_internal_nolock (ptr);
1259 UNLOCK ();
1260 }
1261
1262
1263
1264 void
1265 free (void *ptr)
1266 {
1267 void (*hook) (void *) = __free_hook;
1268
1269 if (hook != NULL)
1270 (*hook) (ptr);
1271 else
1272 _free_internal (ptr);
1273 }
1274
1275
1276 #ifdef weak_alias
1277 weak_alias (free, cfree)
1278 #else
1279 void
1280 cfree (void *ptr)
1281 {
1282 free (ptr);
1283 }
1284 #endif
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 #ifndef min
1306 #define min(a, b) ((a) < (b) ? (a) : (b))
1307 #endif
1308
1309
1310 void *(*__realloc_hook) (void *ptr, size_t size);
1311
1312
1313
1314
1315
1316
1317
1318 void *
1319 _realloc_internal_nolock (void *ptr, size_t size)
1320 {
1321 void *result;
1322 int type;
1323 size_t block, blocks, oldlimit;
1324
1325 if (size == 0)
1326 {
1327 _free_internal_nolock (ptr);
1328 return _malloc_internal_nolock (0);
1329 }
1330 else if (ptr == NULL)
1331 return _malloc_internal_nolock (size);
1332
1333 block = BLOCK (ptr);
1334
1335 PROTECT_MALLOC_STATE (0);
1336
1337 type = _heapinfo[block].busy.type;
1338 switch (type)
1339 {
1340 case 0:
1341
1342 if (size <= BLOCKSIZE / 2)
1343 {
1344 result = _malloc_internal_nolock (size);
1345 if (result != NULL)
1346 {
1347 memcpy (result, ptr, size);
1348 _free_internal_nolock (ptr);
1349 goto out;
1350 }
1351 }
1352
1353
1354
1355 blocks = BLOCKIFY (size);
1356 if (blocks < _heapinfo[block].busy.info.size)
1357 {
1358
1359
1360 _heapinfo[block + blocks].busy.type = 0;
1361 _heapinfo[block + blocks].busy.info.size
1362 = _heapinfo[block].busy.info.size - blocks;
1363 _heapinfo[block].busy.info.size = blocks;
1364
1365
1366
1367 ++_chunks_used;
1368 _free_internal_nolock (ADDRESS (block + blocks));
1369 result = ptr;
1370 }
1371 else if (blocks == _heapinfo[block].busy.info.size)
1372
1373 result = ptr;
1374 else
1375 {
1376
1377
1378
1379 blocks = _heapinfo[block].busy.info.size;
1380
1381 oldlimit = _heaplimit;
1382 _heaplimit = 0;
1383 _free_internal_nolock (ptr);
1384 result = _malloc_internal_nolock (size);
1385 PROTECT_MALLOC_STATE (0);
1386 if (_heaplimit == 0)
1387 _heaplimit = oldlimit;
1388 if (result == NULL)
1389 {
1390
1391
1392
1393 if (_heapindex == block)
1394 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1395 else
1396 {
1397 void *previous
1398 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1399 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1400 _free_internal_nolock (previous);
1401 }
1402 goto out;
1403 }
1404 if (ptr != result)
1405 memmove (result, ptr, blocks * BLOCKSIZE);
1406 }
1407 break;
1408
1409 default:
1410
1411
1412 if (size > (size_t) (1 << (type - 1)) &&
1413 size <= (size_t) (1 << type))
1414
1415 result = ptr;
1416 else
1417 {
1418
1419
1420 result = _malloc_internal_nolock (size);
1421 if (result == NULL)
1422 goto out;
1423 memcpy (result, ptr, min (size, (size_t) 1 << type));
1424 _free_internal_nolock (ptr);
1425 }
1426 break;
1427 }
1428
1429 PROTECT_MALLOC_STATE (1);
1430 out:
1431 return result;
1432 }
1433
1434 void *
1435 _realloc_internal (void *ptr, size_t size)
1436 {
1437 void *result;
1438
1439 LOCK ();
1440 result = _realloc_internal_nolock (ptr, size);
1441 UNLOCK ();
1442
1443 return result;
1444 }
1445
1446 void *
1447 realloc (void *ptr, size_t size)
1448 {
1449 void *(*hook) (void *, size_t);
1450
1451 if (!__malloc_initialized && !__malloc_initialize ())
1452 return NULL;
1453
1454 hook = __realloc_hook;
1455 return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
1456 }
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 void *
1478 calloc (size_t nmemb, size_t size)
1479 {
1480 void *result;
1481 size_t bytes = nmemb * size;
1482
1483 if (size != 0 && bytes / size != nmemb)
1484 {
1485 errno = ENOMEM;
1486 return NULL;
1487 }
1488
1489 result = malloc (bytes);
1490 if (result)
1491 return memset (result, 0, bytes);
1492 return result;
1493 }
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1513 #define __sbrk sbrk
1514 #else
1515
1516
1517
1518 extern void *__sbrk (ptrdiff_t increment);
1519 #endif
1520
1521
1522
1523
1524 void *
1525 __default_morecore (ptrdiff_t increment)
1526 {
1527 void *result;
1528 #if defined (CYGWIN)
1529 if (!DUMPED)
1530 {
1531 return bss_sbrk (increment);
1532 }
1533 #endif
1534 result = (void *) __sbrk (increment);
1535 if (result == (void *) -1)
1536 return NULL;
1537 return result;
1538 }
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554 void *(*__memalign_hook) (size_t size, size_t alignment);
1555
1556 void *
1557 aligned_alloc (size_t alignment, size_t size)
1558 {
1559 void *result;
1560 size_t adj, lastadj;
1561 void *(*hook) (size_t, size_t) = __memalign_hook;
1562
1563 if (hook)
1564 return (*hook) (alignment, size);
1565
1566
1567
1568 if (- size < alignment)
1569 {
1570 errno = ENOMEM;
1571 return NULL;
1572 }
1573 result = malloc (size + alignment - 1);
1574 if (result == NULL)
1575 return NULL;
1576
1577
1578
1579 adj = alignment - (uintptr_t) result % alignment;
1580 if (adj == alignment)
1581 adj = 0;
1582
1583 if (adj != alignment - 1)
1584 {
1585 do
1586 {
1587
1588
1589 free (result);
1590 result = malloc (size + adj);
1591 if (result == NULL)
1592 return NULL;
1593
1594 lastadj = adj;
1595 adj = alignment - (uintptr_t) result % alignment;
1596 if (adj == alignment)
1597 adj = 0;
1598
1599
1600
1601
1602
1603 } while (adj > lastadj);
1604 }
1605
1606 if (adj != 0)
1607 {
1608
1609
1610
1611
1612 struct alignlist *l;
1613 LOCK_ALIGNED_BLOCKS ();
1614 for (l = _aligned_blocks; l != NULL; l = l->next)
1615 if (l->aligned == NULL)
1616
1617 break;
1618 if (l == NULL)
1619 {
1620 l = malloc (sizeof *l);
1621 if (l != NULL)
1622 {
1623 l->next = _aligned_blocks;
1624 _aligned_blocks = l;
1625 }
1626 }
1627 if (l != NULL)
1628 {
1629 l->exact = result;
1630 result = l->aligned = (char *) result + adj;
1631 }
1632 UNLOCK_ALIGNED_BLOCKS ();
1633 if (l == NULL)
1634 {
1635 free (result);
1636 result = NULL;
1637 }
1638 }
1639
1640 return result;
1641 }
1642
1643
1644
1645
1646 void *
1647 memalign (size_t alignment, size_t size)
1648 {
1649 return aligned_alloc (alignment, size);
1650 }
1651
1652
1653
1654 #ifndef HYBRID_MALLOC
1655 int
1656 posix_memalign (void **memptr, size_t alignment, size_t size)
1657 {
1658 void *mem;
1659
1660 if (alignment == 0
1661 || alignment % sizeof (void *) != 0
1662 || (alignment & (alignment - 1)) != 0)
1663 return EINVAL;
1664
1665 mem = aligned_alloc (alignment, size);
1666 if (mem == NULL)
1667 return ENOMEM;
1668
1669 *memptr = mem;
1670
1671 return 0;
1672 }
1673 #endif
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695 extern void *valloc (size_t);
1696
1697 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1698 # include "getpagesize.h"
1699 #elif !defined getpagesize
1700 extern int getpagesize (void);
1701 #endif
1702
1703 static size_t pagesize;
1704
1705 void *
1706 valloc (size_t size)
1707 {
1708 if (pagesize == 0)
1709 pagesize = getpagesize ();
1710
1711 return aligned_alloc (pagesize, size);
1712 }
1713
1714 #ifdef HYBRID_MALLOC
1715 #undef malloc
1716 #undef realloc
1717 #undef calloc
1718 #undef aligned_alloc
1719 #undef free
1720
1721
1722 extern void *malloc (size_t size);
1723 extern void *realloc (void *ptr, size_t size);
1724 extern void *calloc (size_t nmemb, size_t size);
1725 extern void free (void *ptr);
1726 #ifdef HAVE_ALIGNED_ALLOC
1727 extern void *aligned_alloc (size_t alignment, size_t size);
1728 #elif defined HAVE_POSIX_MEMALIGN
1729 extern int posix_memalign (void **memptr, size_t alignment, size_t size);
1730 #endif
1731
1732
1733
1734
1735 void *
1736 hybrid_malloc (size_t size)
1737 {
1738 if (DUMPED)
1739 return malloc (size);
1740 return gmalloc (size);
1741 }
1742
1743 void *
1744 hybrid_calloc (size_t nmemb, size_t size)
1745 {
1746 if (DUMPED)
1747 return calloc (nmemb, size);
1748 return gcalloc (nmemb, size);
1749 }
1750
1751 void
1752 hybrid_free (void *ptr)
1753 {
1754 if (!DUMPED)
1755 gfree (ptr);
1756 else if (!ALLOCATED_BEFORE_DUMPING (ptr))
1757 free (ptr);
1758
1759
1760 return;
1761 }
1762
1763 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1764 void *
1765 hybrid_aligned_alloc (size_t alignment, size_t size)
1766 {
1767 if (!DUMPED)
1768 return galigned_alloc (alignment, size);
1769
1770 #ifdef HAVE_ALIGNED_ALLOC
1771 return aligned_alloc (alignment, size);
1772 #else
1773 void *p;
1774 return posix_memalign (&p, alignment, size) == 0 ? p : 0;
1775 #endif
1776 }
1777 #endif
1778
1779 void *
1780 hybrid_realloc (void *ptr, size_t size)
1781 {
1782 void *result;
1783 int type;
1784 size_t block, oldsize;
1785
1786 if (!DUMPED)
1787 return grealloc (ptr, size);
1788 if (!ALLOCATED_BEFORE_DUMPING (ptr))
1789 return realloc (ptr, size);
1790
1791
1792
1793 if (size == 0 || ptr == NULL)
1794 return malloc (size);
1795 block = ((char *) ptr - _heapbase) / BLOCKSIZE + 1;
1796 type = _heapinfo[block].busy.type;
1797 oldsize =
1798 type == 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
1799 : (size_t) 1 << type;
1800 result = malloc (size);
1801 if (result)
1802 return memcpy (result, ptr, min (oldsize, size));
1803 return result;
1804 }
1805
1806 #ifdef HYBRID_GET_CURRENT_DIR_NAME
1807
1808 char *gget_current_dir_name (void);
1809
1810 char *
1811 hybrid_get_current_dir_name (void)
1812 {
1813 if (DUMPED)
1814 return get_current_dir_name ();
1815 return gget_current_dir_name ();
1816 }
1817 #endif
1818
1819 #endif
1820
1821 #ifdef GC_MCHECK
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843 #include <stdio.h>
1844
1845
1846 static void (*old_free_hook) (void *ptr);
1847 static void *(*old_malloc_hook) (size_t size);
1848 static void *(*old_realloc_hook) (void *ptr, size_t size);
1849
1850
1851 static void (*abortfunc) (enum mcheck_status);
1852
1853
1854 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1855 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1856 #define MAGICBYTE ((char) 0xd7)
1857 #define MALLOCFLOOD ((char) 0x93)
1858 #define FREEFLOOD ((char) 0x95)
1859
1860 struct hdr
1861 {
1862 size_t size;
1863 size_t magic;
1864 };
1865
1866 static enum mcheck_status
1867 checkhdr (const struct hdr *hdr)
1868 {
1869 enum mcheck_status status;
1870 switch (hdr->magic)
1871 {
1872 default:
1873 status = MCHECK_HEAD;
1874 break;
1875 case MAGICFREE:
1876 status = MCHECK_FREE;
1877 break;
1878 case MAGICWORD:
1879 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1880 status = MCHECK_TAIL;
1881 else
1882 status = MCHECK_OK;
1883 break;
1884 }
1885 if (status != MCHECK_OK)
1886 (*abortfunc) (status);
1887 return status;
1888 }
1889
1890 static void
1891 freehook (void *ptr)
1892 {
1893 struct hdr *hdr;
1894
1895 if (ptr)
1896 {
1897 struct alignlist *l;
1898
1899
1900
1901 PROTECT_MALLOC_STATE (0);
1902 LOCK_ALIGNED_BLOCKS ();
1903 for (l = _aligned_blocks; l != NULL; l = l->next)
1904 if (l->aligned == ptr)
1905 {
1906 l->aligned = NULL;
1907 ptr = l->exact;
1908 break;
1909 }
1910 UNLOCK_ALIGNED_BLOCKS ();
1911 PROTECT_MALLOC_STATE (1);
1912
1913 hdr = ((struct hdr *) ptr) - 1;
1914 checkhdr (hdr);
1915 hdr->magic = MAGICFREE;
1916 memset (ptr, FREEFLOOD, hdr->size);
1917 }
1918 else
1919 hdr = NULL;
1920
1921 __free_hook = old_free_hook;
1922 free (hdr);
1923 __free_hook = freehook;
1924 }
1925
1926 static void *
1927 mallochook (size_t size)
1928 {
1929 struct hdr *hdr;
1930
1931 __malloc_hook = old_malloc_hook;
1932 hdr = malloc (sizeof *hdr + size + 1);
1933 __malloc_hook = mallochook;
1934 if (hdr == NULL)
1935 return NULL;
1936
1937 hdr->size = size;
1938 hdr->magic = MAGICWORD;
1939 ((char *) &hdr[1])[size] = MAGICBYTE;
1940 return memset (hdr + 1, MALLOCFLOOD, size);
1941 }
1942
1943 static void *
1944 reallochook (void *ptr, size_t size)
1945 {
1946 struct hdr *hdr = NULL;
1947 size_t osize = 0;
1948
1949 if (ptr)
1950 {
1951 hdr = ((struct hdr *) ptr) - 1;
1952 osize = hdr->size;
1953
1954 checkhdr (hdr);
1955 if (size < osize)
1956 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1957 }
1958
1959 __free_hook = old_free_hook;
1960 __malloc_hook = old_malloc_hook;
1961 __realloc_hook = old_realloc_hook;
1962 hdr = realloc (hdr, sizeof *hdr + size + 1);
1963 __free_hook = freehook;
1964 __malloc_hook = mallochook;
1965 __realloc_hook = reallochook;
1966 if (hdr == NULL)
1967 return NULL;
1968
1969 hdr->size = size;
1970 hdr->magic = MAGICWORD;
1971 ((char *) &hdr[1])[size] = MAGICBYTE;
1972 if (size > osize)
1973 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1974 return hdr + 1;
1975 }
1976
1977 static void
1978 mabort (enum mcheck_status status)
1979 {
1980 const char *msg;
1981 switch (status)
1982 {
1983 case MCHECK_OK:
1984 msg = "memory is consistent, library is buggy";
1985 break;
1986 case MCHECK_HEAD:
1987 msg = "memory clobbered before allocated block";
1988 break;
1989 case MCHECK_TAIL:
1990 msg = "memory clobbered past end of allocated block";
1991 break;
1992 case MCHECK_FREE:
1993 msg = "block freed twice";
1994 break;
1995 default:
1996 msg = "bogus mcheck_status, library is buggy";
1997 break;
1998 }
1999 #ifdef __GNU_LIBRARY__
2000 __libc_fatal (msg);
2001 #else
2002 fprintf (stderr, "mcheck: %s\n", msg);
2003 fflush (stderr);
2004 # ifdef emacs
2005 emacs_abort ();
2006 # else
2007 abort ();
2008 # endif
2009 #endif
2010 }
2011
2012 static int mcheck_used = 0;
2013
2014 int
2015 mcheck (void (*func) (enum mcheck_status))
2016 {
2017 abortfunc = (func != NULL) ? func : &mabort;
2018
2019
2020 if (!__malloc_initialized && !mcheck_used)
2021 {
2022 old_free_hook = __free_hook;
2023 __free_hook = freehook;
2024 old_malloc_hook = __malloc_hook;
2025 __malloc_hook = mallochook;
2026 old_realloc_hook = __realloc_hook;
2027 __realloc_hook = reallochook;
2028 mcheck_used = 1;
2029 }
2030
2031 return mcheck_used ? 0 : -1;
2032 }
2033
2034 enum mcheck_status
2035 mprobe (void *ptr)
2036 {
2037 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2038 }
2039
2040 #endif