This source file includes following definitions.
- protect_malloc_state
- align
- get_contiguous_space
- register_heapinfo
- malloc_atfork_handler_prepare
- malloc_atfork_handler_parent
- malloc_atfork_handler_child
- malloc_enable_thread
- malloc_initialize_1
- __malloc_initialize
- morecore_nolock
- _malloc_internal_nolock
- _malloc_internal
- malloc
- _malloc
- _free
- _realloc
- _free_internal_nolock
- _free_internal
- free
- weak_alias
- _realloc_internal_nolock
- _realloc_internal
- realloc
- calloc
- gdefault_morecore
- aligned_alloc
- memalign
- posix_memalign
- valloc
- allocated_via_gmalloc
- hybrid_malloc
- hybrid_calloc
- hybrid_free_1
- hybrid_free
- hybrid_aligned_alloc
- hybrid_realloc
- malloc
- calloc
- free
- aligned_alloc
- realloc
- checkhdr
- freehook
- mallochook
- reallochook
- mabort
- mcheck
- mprobe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 #include <config.h>
23
24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
25 #define USE_PTHREAD
26 #endif
27
28 #include <stddef.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <limits.h>
32 #include <stdint.h>
33 #include <unistd.h>
34
35 #ifdef USE_PTHREAD
36 #include <pthread.h>
37 #endif
38
39 #include "lisp.h"
40
41 #ifdef HAVE_MALLOC_H
42 # if GNUC_PREREQ (4, 2, 0)
43 # pragma GCC diagnostic ignored "-Wdeprecated-declarations"
44 # endif
45 # include <malloc.h>
46 #endif
47 #ifndef __MALLOC_HOOK_VOLATILE
48 # define __MALLOC_HOOK_VOLATILE volatile
49 #endif
50 #ifndef HAVE_MALLOC_H
51 extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
52 extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
53 extern void *(*__morecore) (ptrdiff_t);
54 #endif
55
56
57
58
59
60
61
62
63 #undef malloc
64 #undef realloc
65 #undef calloc
66 #undef aligned_alloc
67 #undef free
68 #define malloc gmalloc
69 #define realloc grealloc
70 #define calloc gcalloc
71 #define aligned_alloc galigned_alloc
72 #define free gfree
73 #define malloc_info gmalloc_info
74
75 #ifdef HYBRID_MALLOC
76 # include "sheap.h"
77 #endif
78
79 #ifdef __cplusplus
80 extern "C"
81 {
82 #endif
83
84 #ifdef HYBRID_MALLOC
85 #define extern static
86 #endif
87
88
89 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
90
91
92 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
93
94 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
95
96 extern void free (void *ptr);
97
98
99 extern void *aligned_alloc (size_t, size_t);
100 #ifdef MSDOS
101 extern void *memalign (size_t, size_t);
102 extern int posix_memalign (void **, size_t, size_t);
103 #endif
104
105
106
107
108
109
110 #define BLOCKLOG (INT_WIDTH > 16 ? 12 : 9)
111 #define BLOCKSIZE (1 << BLOCKLOG)
112 #define BLOCKIFY(SIZE) (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
113
114
115
116 #define HEAP (INT_WIDTH > 16 ? 4194304 : 65536)
117
118
119
120 #define FINAL_FREE_BLOCKS 8
121
122
123 typedef union
124 {
125
126 struct
127 {
128
129
130
131
132
133
134
135 int type;
136 union
137 {
138 struct
139 {
140 size_t nfree;
141 size_t first;
142 } frag;
143
144
145 ptrdiff_t size;
146 } info;
147 } busy;
148
149
150 struct
151 {
152 size_t size;
153 size_t next;
154 size_t prev;
155 } free;
156 } malloc_info;
157
158
159 extern char *_heapbase;
160
161
162 extern malloc_info *_heapinfo;
163
164
165 #define BLOCK(A) ((size_t) ((char *) (A) - _heapbase) / BLOCKSIZE + 1)
166 #define ADDRESS(B) ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
167
168
169 extern size_t _heapindex;
170
171
172 extern size_t _heaplimit;
173
174
175 struct list
176 {
177 struct list *next;
178 struct list *prev;
179 };
180
181
182 static struct list _fraghead[BLOCKLOG];
183
184
185 struct alignlist
186 {
187 struct alignlist *next;
188 void *aligned;
189 void *exact;
190 };
191 extern struct alignlist *_aligned_blocks;
192
193
194 extern size_t _chunks_used;
195 extern size_t _bytes_used;
196 extern size_t _chunks_free;
197 extern size_t _bytes_free;
198
199
200
201
202 extern void *_malloc_internal (size_t);
203 extern void *_realloc_internal (void *, size_t);
204 extern void _free_internal (void *);
205 extern void *_malloc_internal_nolock (size_t);
206 extern void *_realloc_internal_nolock (void *, size_t);
207 extern void _free_internal_nolock (void *);
208
209 #ifdef USE_PTHREAD
210 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
211 extern int _malloc_thread_enabled_p;
212 #define LOCK() \
213 do { \
214 if (_malloc_thread_enabled_p) \
215 pthread_mutex_lock (&_malloc_mutex); \
216 } while (0)
217 #define UNLOCK() \
218 do { \
219 if (_malloc_thread_enabled_p) \
220 pthread_mutex_unlock (&_malloc_mutex); \
221 } while (0)
222 #define LOCK_ALIGNED_BLOCKS() \
223 do { \
224 if (_malloc_thread_enabled_p) \
225 pthread_mutex_lock (&_aligned_blocks_mutex); \
226 } while (0)
227 #define UNLOCK_ALIGNED_BLOCKS() \
228 do { \
229 if (_malloc_thread_enabled_p) \
230 pthread_mutex_unlock (&_aligned_blocks_mutex); \
231 } while (0)
232 #else
233 #define LOCK()
234 #define UNLOCK()
235 #define LOCK_ALIGNED_BLOCKS()
236 #define UNLOCK_ALIGNED_BLOCKS()
237 #endif
238
239
240 extern int __malloc_initialized;
241
242 extern int __malloc_initialize (void);
243
244 #ifdef GC_MCHECK
245
246
247
248 enum mcheck_status
249 {
250 MCHECK_DISABLED = -1,
251 MCHECK_OK,
252 MCHECK_FREE,
253 MCHECK_HEAD,
254 MCHECK_TAIL
255 };
256
257
258
259
260
261 extern int mcheck (void (*abortfunc) (enum mcheck_status));
262
263
264
265
266 extern enum mcheck_status mprobe (void *ptr);
267
268
269 extern void mtrace (void);
270 extern void muntrace (void);
271
272
273 struct mstats
274 {
275 size_t bytes_total;
276 size_t chunks_used;
277 size_t bytes_used;
278 size_t chunks_free;
279 size_t bytes_free;
280 };
281
282
283 extern struct mstats mstats (void);
284
285 #endif
286
287 #undef extern
288
289 #ifdef __cplusplus
290 }
291 #endif
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313 #include <errno.h>
314
315
316 static void *(*__MALLOC_HOOK_VOLATILE gmalloc_hook) (size_t);
317
318
319
320
321 void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
322 void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
323 void *(*__morecore) (ptrdiff_t);
324
325 #ifndef HYBRID_MALLOC
326
327
328 char *_heapbase;
329
330
331 malloc_info *_heapinfo;
332
333
334 size_t _heapindex;
335
336
337 size_t _heaplimit;
338
339
340 size_t _chunks_used;
341 size_t _bytes_used;
342 size_t _chunks_free;
343 size_t _bytes_free;
344
345
346 int __malloc_initialized;
347
348 #endif
349
350
351
352 #if defined DOUG_LEA_MALLOC || defined HYBRID_MALLOC || defined SYSTEM_MALLOC
353 static
354 #endif
355 size_t __malloc_extra_blocks;
356
357
358 static size_t heapsize;
359
360 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
361
362
363
364
365
366
367
368
369
370
371 #include <sys/types.h>
372 #include <sys/mman.h>
373
374 static int state_protected_p;
375 static size_t last_state_size;
376 static malloc_info *last_heapinfo;
377
378 void
379 protect_malloc_state (int protect_p)
380 {
381
382
383 if (_heapinfo != last_heapinfo
384 && last_heapinfo
385 && state_protected_p)
386 mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
387
388 last_state_size = _heaplimit * sizeof *_heapinfo;
389 last_heapinfo = _heapinfo;
390
391 if (protect_p != state_protected_p)
392 {
393 state_protected_p = protect_p;
394 if (mprotect (_heapinfo, last_state_size,
395 protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
396 abort ();
397 }
398 }
399
400 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
401
402 #else
403 #define PROTECT_MALLOC_STATE(PROT)
404 #endif
405
406
407
408 static void *
409 align (size_t size)
410 {
411 void *result;
412 ptrdiff_t adj;
413
414
415
416
417
418 if (PTRDIFF_MAX < size)
419 result = 0;
420 else
421 result = (*__morecore) (size);
422 adj = (uintptr_t) result % BLOCKSIZE;
423 if (adj != 0)
424 {
425 adj = BLOCKSIZE - adj;
426 (*__morecore) (adj);
427 result = (char *) result + adj;
428 }
429
430 if (__after_morecore_hook)
431 (*__after_morecore_hook) ();
432
433 return result;
434 }
435
436
437
438
439 static void *
440 get_contiguous_space (ptrdiff_t size, void *position)
441 {
442 void *before;
443 void *after;
444
445 before = (*__morecore) (0);
446
447
448 if (before != position)
449 return 0;
450
451
452 after = (*__morecore) (size);
453 if (!after)
454 return 0;
455
456
457 if (after != position)
458 {
459 (*__morecore) (- size);
460 return 0;
461 }
462
463 return after;
464 }
465
466
467
468
469
470 static void
471 register_heapinfo (void)
472 {
473 size_t block, blocks;
474
475 block = BLOCK (_heapinfo);
476 blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
477
478
479 _bytes_used += blocks * BLOCKSIZE;
480 ++_chunks_used;
481
482
483 _heapinfo[block].busy.type = -1;
484 _heapinfo[block].busy.info.size = blocks;
485 }
486
487 #ifdef USE_PTHREAD
488 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
489 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
490 int _malloc_thread_enabled_p;
491
492 static void
493 malloc_atfork_handler_prepare (void)
494 {
495 LOCK ();
496 LOCK_ALIGNED_BLOCKS ();
497 }
498
499 static void
500 malloc_atfork_handler_parent (void)
501 {
502 UNLOCK_ALIGNED_BLOCKS ();
503 UNLOCK ();
504 }
505
506 static void
507 malloc_atfork_handler_child (void)
508 {
509 UNLOCK_ALIGNED_BLOCKS ();
510 UNLOCK ();
511 }
512
513
514 void
515 malloc_enable_thread (void)
516 {
517 if (_malloc_thread_enabled_p)
518 return;
519
520
521
522
523
524 pthread_mutex_init (&_malloc_mutex, NULL);
525 pthread_mutex_init (&_aligned_blocks_mutex, NULL);
526 pthread_atfork (malloc_atfork_handler_prepare,
527 malloc_atfork_handler_parent,
528 malloc_atfork_handler_child);
529 _malloc_thread_enabled_p = 1;
530 }
531 #endif
532
533 static void
534 malloc_initialize_1 (void)
535 {
536 #ifdef GC_MCHECK
537 mcheck (NULL);
538 #endif
539
540 if (__malloc_initialize_hook)
541 (*__malloc_initialize_hook) ();
542
543 heapsize = HEAP / BLOCKSIZE;
544 _heapinfo = align (heapsize * sizeof (malloc_info));
545 if (_heapinfo == NULL)
546 return;
547 memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
548 _heapinfo[0].free.size = 0;
549 _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
550 _heapindex = 0;
551 _heapbase = (char *) _heapinfo;
552 _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
553
554 register_heapinfo ();
555
556 __malloc_initialized = 1;
557 PROTECT_MALLOC_STATE (1);
558 return;
559 }
560
561
562
563
564 int
565 __malloc_initialize (void)
566 {
567 if (__malloc_initialized)
568 return 0;
569
570 malloc_initialize_1 ();
571
572 return __malloc_initialized;
573 }
574
575 static int morecore_recursing;
576
577
578
579 static void *
580 morecore_nolock (size_t size)
581 {
582 void *result;
583 malloc_info *newinfo, *oldinfo;
584 size_t newsize;
585
586 if (morecore_recursing)
587
588 return NULL;
589
590 result = align (size);
591 if (result == NULL)
592 return NULL;
593
594 PROTECT_MALLOC_STATE (0);
595
596
597 if (heapsize < BLOCK ((char *) result + size))
598 {
599
600
601
602
603 newsize = heapsize;
604 do
605 newsize *= 2;
606 while (newsize < BLOCK ((char *) result + size));
607
608
609
610
611
612
613 if (_heaplimit != 0)
614 {
615
616
617
618
619
620 int save = errno;
621 morecore_recursing = 1;
622 newinfo = _realloc_internal_nolock (_heapinfo,
623 newsize * sizeof (malloc_info));
624 morecore_recursing = 0;
625 if (newinfo == NULL)
626 errno = save;
627 else
628 {
629
630
631
632 memset (&newinfo[heapsize], 0,
633 (newsize - heapsize) * sizeof (malloc_info));
634 _heapinfo = newinfo;
635 heapsize = newsize;
636 goto got_heap;
637 }
638 }
639
640
641 while (1)
642 {
643 newinfo = align (newsize * sizeof (malloc_info));
644
645
646 if (newinfo == NULL)
647 {
648 (*__morecore) (-size);
649 return NULL;
650 }
651
652
653
654 if (BLOCK ((char *) newinfo + newsize * sizeof (malloc_info))
655 < newsize)
656 break;
657
658
659 (*__morecore) (- newsize * sizeof (malloc_info));
660 newsize *= 2;
661 }
662
663
664
665 memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
666 memset (&newinfo[heapsize], 0,
667 (newsize - heapsize) * sizeof (malloc_info));
668 oldinfo = _heapinfo;
669 _heapinfo = newinfo;
670 heapsize = newsize;
671
672 register_heapinfo ();
673
674
675
676 _heaplimit = 0;
677 _free_internal_nolock (oldinfo);
678 PROTECT_MALLOC_STATE (0);
679
680
681 _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
682 return result;
683 }
684
685 got_heap:
686 _heaplimit = BLOCK ((char *) result + size);
687 return result;
688 }
689
690
691 void *
692 _malloc_internal_nolock (size_t size)
693 {
694 void *result;
695 size_t block, blocks, lastblocks, start;
696 register size_t i;
697 struct list *next;
698
699
700
701
702
703
704
705
706 #if 0
707 if (size == 0)
708 return NULL;
709 #endif
710
711 PROTECT_MALLOC_STATE (0);
712
713 if (size < sizeof (struct list))
714 size = sizeof (struct list);
715
716
717 if (size <= BLOCKSIZE / 2)
718 {
719
720
721 register size_t log = 1;
722 --size;
723 while ((size /= 2) != 0)
724 ++log;
725
726
727
728 next = _fraghead[log].next;
729 if (next != NULL)
730 {
731
732
733
734 result = next;
735 next->prev->next = next->next;
736 if (next->next != NULL)
737 next->next->prev = next->prev;
738 block = BLOCK (result);
739 if (--_heapinfo[block].busy.info.frag.nfree != 0)
740 _heapinfo[block].busy.info.frag.first =
741 (uintptr_t) next->next % BLOCKSIZE >> log;
742
743
744 ++_chunks_used;
745 _bytes_used += 1 << log;
746 --_chunks_free;
747 _bytes_free -= 1 << log;
748 }
749 else
750 {
751
752
753 #ifdef GC_MALLOC_CHECK
754 result = _malloc_internal_nolock (BLOCKSIZE);
755 PROTECT_MALLOC_STATE (0);
756 #elif defined (USE_PTHREAD)
757 result = _malloc_internal_nolock (BLOCKSIZE);
758 #else
759 result = malloc (BLOCKSIZE);
760 #endif
761 if (result == NULL)
762 {
763 PROTECT_MALLOC_STATE (1);
764 goto out;
765 }
766
767
768 next = (struct list *) ((char *) result + (1 << log));
769 next->next = NULL;
770 next->prev = &_fraghead[log];
771 _fraghead[log].next = next;
772
773 for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
774 {
775 next = (struct list *) ((char *) result + (i << log));
776 next->next = _fraghead[log].next;
777 next->prev = &_fraghead[log];
778 next->prev->next = next;
779 next->next->prev = next;
780 }
781
782
783 block = BLOCK (result);
784 _heapinfo[block].busy.type = log;
785 _heapinfo[block].busy.info.frag.nfree = i - 1;
786 _heapinfo[block].busy.info.frag.first = i - 1;
787
788 _chunks_free += (BLOCKSIZE >> log) - 1;
789 _bytes_free += BLOCKSIZE - (1 << log);
790 _bytes_used -= BLOCKSIZE - (1 << log);
791 }
792 }
793 else
794 {
795
796
797
798
799 blocks = BLOCKIFY (size);
800 start = block = _heapindex;
801 while (_heapinfo[block].free.size < blocks)
802 {
803 block = _heapinfo[block].free.next;
804 if (block == start)
805 {
806
807 size_t wantblocks = blocks + __malloc_extra_blocks;
808 block = _heapinfo[0].free.prev;
809 lastblocks = _heapinfo[block].free.size;
810
811
812 if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
813
814
815 block + wantblocks <= heapsize &&
816 get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
817 ADDRESS (block + lastblocks)))
818 {
819
820
821
822 block = _heapinfo[0].free.prev;
823 _heapinfo[block].free.size += (wantblocks - lastblocks);
824 _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
825 _heaplimit += wantblocks - lastblocks;
826 continue;
827 }
828 result = morecore_nolock (wantblocks * BLOCKSIZE);
829 if (result == NULL)
830 goto out;
831 block = BLOCK (result);
832
833 _heapinfo[block].free.size = wantblocks;
834 _heapinfo[block].free.prev = _heapinfo[0].free.prev;
835 _heapinfo[block].free.next = 0;
836 _heapinfo[0].free.prev = block;
837 _heapinfo[_heapinfo[block].free.prev].free.next = block;
838 ++_chunks_free;
839
840 }
841 }
842
843
844
845 result = ADDRESS (block);
846 if (_heapinfo[block].free.size > blocks)
847 {
848
849
850 _heapinfo[block + blocks].free.size
851 = _heapinfo[block].free.size - blocks;
852 _heapinfo[block + blocks].free.next
853 = _heapinfo[block].free.next;
854 _heapinfo[block + blocks].free.prev
855 = _heapinfo[block].free.prev;
856 _heapinfo[_heapinfo[block].free.prev].free.next
857 = _heapinfo[_heapinfo[block].free.next].free.prev
858 = _heapindex = block + blocks;
859 }
860 else
861 {
862
863
864 _heapinfo[_heapinfo[block].free.next].free.prev
865 = _heapinfo[block].free.prev;
866 _heapinfo[_heapinfo[block].free.prev].free.next
867 = _heapindex = _heapinfo[block].free.next;
868 --_chunks_free;
869 }
870
871 _heapinfo[block].busy.type = -1;
872 _heapinfo[block].busy.info.size = blocks;
873 ++_chunks_used;
874 _bytes_used += blocks * BLOCKSIZE;
875 _bytes_free -= blocks * BLOCKSIZE;
876 }
877
878 PROTECT_MALLOC_STATE (1);
879 out:
880 return result;
881 }
882
883 void *
884 _malloc_internal (size_t size)
885 {
886 void *result;
887
888 LOCK ();
889 result = _malloc_internal_nolock (size);
890 UNLOCK ();
891
892 return result;
893 }
894
895 void *
896 malloc (size_t size)
897 {
898 void *(*hook) (size_t);
899
900 if (!__malloc_initialized && !__malloc_initialize ())
901 return NULL;
902
903
904
905
906
907
908
909
910
911 hook = gmalloc_hook;
912 return (hook ? hook : _malloc_internal) (size);
913 }
914
915 #if !(defined (_LIBC) || defined (HYBRID_MALLOC))
916
917
918
919
920 extern void *_malloc (size_t);
921 extern void _free (void *);
922 extern void *_realloc (void *, size_t);
923
924 void *
925 _malloc (size_t size)
926 {
927 return malloc (size);
928 }
929
930 void
931 _free (void *ptr)
932 {
933 free (ptr);
934 }
935
936 void *
937 _realloc (void *ptr, size_t size)
938 {
939 return realloc (ptr, size);
940 }
941
942 #endif
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964 static void (*__MALLOC_HOOK_VOLATILE gfree_hook) (void *);
965
966 #ifndef HYBRID_MALLOC
967
968
969 struct alignlist *_aligned_blocks = NULL;
970 #endif
971
972
973
974 void
975 _free_internal_nolock (void *ptr)
976 {
977 int type;
978 size_t block, blocks;
979 register size_t i;
980 struct list *prev, *next;
981 void *curbrk;
982 const size_t lesscore_threshold
983
984 = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
985
986 register struct alignlist *l;
987
988 if (ptr == NULL)
989 return;
990
991 PROTECT_MALLOC_STATE (0);
992
993 LOCK_ALIGNED_BLOCKS ();
994 for (l = _aligned_blocks; l != NULL; l = l->next)
995 if (l->aligned == ptr)
996 {
997 l->aligned = NULL;
998 ptr = l->exact;
999 break;
1000 }
1001 UNLOCK_ALIGNED_BLOCKS ();
1002
1003 block = BLOCK (ptr);
1004
1005 type = _heapinfo[block].busy.type;
1006 switch (type)
1007 {
1008 case -1:
1009
1010 --_chunks_used;
1011 _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
1012 _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
1013
1014
1015
1016
1017 i = _heapindex;
1018 if (i > block)
1019 while (i > block)
1020 i = _heapinfo[i].free.prev;
1021 else
1022 {
1023 do
1024 i = _heapinfo[i].free.next;
1025 while (i > 0 && i < block);
1026 i = _heapinfo[i].free.prev;
1027 }
1028
1029
1030 if (block == i + _heapinfo[i].free.size)
1031 {
1032
1033 _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
1034 block = i;
1035 }
1036 else
1037 {
1038
1039 _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
1040 _heapinfo[block].free.next = _heapinfo[i].free.next;
1041 _heapinfo[block].free.prev = i;
1042 _heapinfo[i].free.next = block;
1043 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1044 ++_chunks_free;
1045 }
1046
1047
1048
1049
1050 if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
1051 {
1052 _heapinfo[block].free.size
1053 += _heapinfo[_heapinfo[block].free.next].free.size;
1054 _heapinfo[block].free.next
1055 = _heapinfo[_heapinfo[block].free.next].free.next;
1056 _heapinfo[_heapinfo[block].free.next].free.prev = block;
1057 --_chunks_free;
1058 }
1059
1060
1061 blocks = _heapinfo[block].free.size;
1062
1063
1064 curbrk = (*__morecore) (0);
1065
1066 if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
1067 {
1068
1069
1070
1071
1072 size_t info_block = BLOCK (_heapinfo);
1073 size_t info_blocks = _heapinfo[info_block].busy.info.size;
1074 size_t prev_block = _heapinfo[block].free.prev;
1075 size_t prev_blocks = _heapinfo[prev_block].free.size;
1076 size_t next_block = _heapinfo[block].free.next;
1077 size_t next_blocks = _heapinfo[next_block].free.size;
1078
1079 if (
1080
1081
1082
1083 (block + blocks == _heaplimit &&
1084 info_block + info_blocks == block &&
1085 prev_block != 0 && prev_block + prev_blocks == info_block &&
1086 blocks + prev_blocks >= lesscore_threshold) ||
1087
1088
1089
1090
1091 (block + blocks == info_block &&
1092 ((info_block + info_blocks == _heaplimit &&
1093 blocks >= lesscore_threshold) ||
1094 (info_block + info_blocks == next_block &&
1095 next_block + next_blocks == _heaplimit &&
1096 blocks + next_blocks >= lesscore_threshold)))
1097 )
1098 {
1099 malloc_info *newinfo;
1100 size_t oldlimit = _heaplimit;
1101
1102
1103
1104
1105
1106 _heaplimit = 0;
1107 _free_internal_nolock (_heapinfo);
1108 _heaplimit = oldlimit;
1109
1110
1111
1112 _heapindex = 0;
1113
1114
1115 newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
1116 PROTECT_MALLOC_STATE (0);
1117 memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
1118 _heapinfo = newinfo;
1119
1120
1121
1122
1123
1124 block = _heapinfo[0].free.prev;
1125 blocks = _heapinfo[block].free.size;
1126 }
1127
1128
1129 if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
1130 {
1131 register size_t bytes = blocks * BLOCKSIZE;
1132 _heaplimit -= blocks;
1133 (*__morecore) (-bytes);
1134 _heapinfo[_heapinfo[block].free.prev].free.next
1135 = _heapinfo[block].free.next;
1136 _heapinfo[_heapinfo[block].free.next].free.prev
1137 = _heapinfo[block].free.prev;
1138 block = _heapinfo[block].free.prev;
1139 --_chunks_free;
1140 _bytes_free -= bytes;
1141 }
1142 }
1143
1144
1145 _heapindex = block;
1146 break;
1147
1148 default:
1149
1150 --_chunks_used;
1151 _bytes_used -= 1 << type;
1152 ++_chunks_free;
1153 _bytes_free += 1 << type;
1154
1155
1156 prev = (struct list *) ((char *) ADDRESS (block) +
1157 (_heapinfo[block].busy.info.frag.first << type));
1158
1159 if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
1160 {
1161
1162
1163 next = prev;
1164 for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
1165 next = next->next;
1166 prev->prev->next = next;
1167 if (next != NULL)
1168 next->prev = prev->prev;
1169 _heapinfo[block].busy.type = -1;
1170 _heapinfo[block].busy.info.size = 1;
1171
1172
1173 ++_chunks_used;
1174 _bytes_used += BLOCKSIZE;
1175 _chunks_free -= BLOCKSIZE >> type;
1176 _bytes_free -= BLOCKSIZE;
1177
1178 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
1179 _free_internal_nolock (ADDRESS (block));
1180 #else
1181 free (ADDRESS (block));
1182 #endif
1183 }
1184 else if (_heapinfo[block].busy.info.frag.nfree != 0)
1185 {
1186
1187
1188
1189 next = ptr;
1190 next->next = prev->next;
1191 next->prev = prev;
1192 prev->next = next;
1193 if (next->next != NULL)
1194 next->next->prev = next;
1195 ++_heapinfo[block].busy.info.frag.nfree;
1196 }
1197 else
1198 {
1199
1200
1201
1202 prev = ptr;
1203 _heapinfo[block].busy.info.frag.nfree = 1;
1204 _heapinfo[block].busy.info.frag.first =
1205 (uintptr_t) ptr % BLOCKSIZE >> type;
1206 prev->next = _fraghead[type].next;
1207 prev->prev = &_fraghead[type];
1208 prev->prev->next = prev;
1209 if (prev->next != NULL)
1210 prev->next->prev = prev;
1211 }
1212 break;
1213 }
1214
1215 PROTECT_MALLOC_STATE (1);
1216 }
1217
1218
1219
1220 void
1221 _free_internal (void *ptr)
1222 {
1223 LOCK ();
1224 _free_internal_nolock (ptr);
1225 UNLOCK ();
1226 }
1227
1228
1229
1230 void
1231 free (void *ptr)
1232 {
1233 void (*hook) (void *) = gfree_hook;
1234
1235 if (hook != NULL)
1236 (*hook) (ptr);
1237 else
1238 _free_internal (ptr);
1239 }
1240
1241 #ifndef HYBRID_MALLOC
1242
1243 #ifdef weak_alias
1244 weak_alias (free, cfree)
1245 #else
1246 void
1247 cfree (void *ptr)
1248 {
1249 free (ptr);
1250 }
1251 #endif
1252 #endif
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273 #ifndef min
1274 #define min(a, b) ((a) < (b) ? (a) : (b))
1275 #endif
1276
1277
1278 static void *(*grealloc_hook) (void *, size_t);
1279
1280
1281
1282
1283
1284
1285
1286 void *
1287 _realloc_internal_nolock (void *ptr, size_t size)
1288 {
1289 void *result;
1290 int type;
1291 size_t block, blocks, oldlimit;
1292
1293 if (size == 0)
1294 {
1295 _free_internal_nolock (ptr);
1296 return _malloc_internal_nolock (0);
1297 }
1298 else if (ptr == NULL)
1299 return _malloc_internal_nolock (size);
1300
1301 block = BLOCK (ptr);
1302
1303 PROTECT_MALLOC_STATE (0);
1304
1305 type = _heapinfo[block].busy.type;
1306 switch (type)
1307 {
1308 case -1:
1309
1310 if (size <= BLOCKSIZE / 2)
1311 {
1312 result = _malloc_internal_nolock (size);
1313 if (result != NULL)
1314 {
1315 memcpy (result, ptr, size);
1316 _free_internal_nolock (ptr);
1317 goto out;
1318 }
1319 }
1320
1321
1322
1323 blocks = BLOCKIFY (size);
1324 if (blocks < _heapinfo[block].busy.info.size)
1325 {
1326
1327
1328 _heapinfo[block + blocks].busy.type = -1;
1329 _heapinfo[block + blocks].busy.info.size
1330 = _heapinfo[block].busy.info.size - blocks;
1331 _heapinfo[block].busy.info.size = blocks;
1332
1333
1334
1335 ++_chunks_used;
1336 _free_internal_nolock (ADDRESS (block + blocks));
1337 result = ptr;
1338 }
1339 else if (blocks == _heapinfo[block].busy.info.size)
1340
1341 result = ptr;
1342 else
1343 {
1344
1345
1346
1347 blocks = _heapinfo[block].busy.info.size;
1348
1349 oldlimit = _heaplimit;
1350 _heaplimit = 0;
1351 _free_internal_nolock (ptr);
1352 result = _malloc_internal_nolock (size);
1353 PROTECT_MALLOC_STATE (0);
1354 if (_heaplimit == 0)
1355 _heaplimit = oldlimit;
1356 if (result == NULL)
1357 {
1358
1359
1360
1361 if (_heapindex == block)
1362 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1363 else
1364 {
1365 void *previous
1366 = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
1367 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
1368 _free_internal_nolock (previous);
1369 }
1370 goto out;
1371 }
1372 if (ptr != result)
1373 memmove (result, ptr, blocks * BLOCKSIZE);
1374 }
1375 break;
1376
1377 default:
1378
1379
1380 if (size > (size_t) (1 << (type - 1)) &&
1381 size <= (size_t) (1 << type))
1382
1383 result = ptr;
1384 else
1385 {
1386
1387
1388 result = _malloc_internal_nolock (size);
1389 if (result == NULL)
1390 goto out;
1391 memcpy (result, ptr, min (size, (size_t) 1 << type));
1392 _free_internal_nolock (ptr);
1393 }
1394 break;
1395 }
1396
1397 PROTECT_MALLOC_STATE (1);
1398 out:
1399 return result;
1400 }
1401
1402 void *
1403 _realloc_internal (void *ptr, size_t size)
1404 {
1405 void *result;
1406
1407 LOCK ();
1408 result = _realloc_internal_nolock (ptr, size);
1409 UNLOCK ();
1410
1411 return result;
1412 }
1413
1414 void *
1415 realloc (void *ptr, size_t size)
1416 {
1417 void *(*hook) (void *, size_t);
1418
1419 if (!__malloc_initialized && !__malloc_initialize ())
1420 return NULL;
1421
1422 hook = grealloc_hook;
1423 return (hook ? hook : _realloc_internal) (ptr, size);
1424 }
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445 void *
1446 calloc (size_t nmemb, size_t size)
1447 {
1448 void *result;
1449 size_t bytes = nmemb * size;
1450
1451 if (size != 0 && bytes / size != nmemb)
1452 {
1453 errno = ENOMEM;
1454 return NULL;
1455 }
1456
1457 result = malloc (bytes);
1458 if (result)
1459 return memset (result, 0, bytes);
1460 return result;
1461 }
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
1481 #define __sbrk sbrk
1482 #else
1483
1484
1485
1486 extern void *__sbrk (ptrdiff_t increment);
1487 #endif
1488
1489
1490
1491
1492 static void *
1493 gdefault_morecore (ptrdiff_t increment)
1494 {
1495 #ifdef HYBRID_MALLOC
1496 if (!definitely_will_not_unexec_p ())
1497 {
1498 return bss_sbrk (increment);
1499 }
1500 #endif
1501 #ifdef HAVE_SBRK
1502 void *result = (void *) __sbrk (increment);
1503 if (result != (void *) -1)
1504 return result;
1505 #endif
1506 return NULL;
1507 }
1508
1509 void *(*__morecore) (ptrdiff_t) = gdefault_morecore;
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526 void *
1527 aligned_alloc (size_t alignment, size_t size)
1528 {
1529 void *result;
1530 size_t adj, lastadj;
1531
1532
1533
1534 if (- size < alignment)
1535 {
1536 errno = ENOMEM;
1537 return NULL;
1538 }
1539 result = malloc (size + alignment - 1);
1540 if (result == NULL)
1541 return NULL;
1542
1543
1544
1545 adj = alignment - (uintptr_t) result % alignment;
1546 if (adj == alignment)
1547 adj = 0;
1548
1549 if (adj != alignment - 1)
1550 {
1551 do
1552 {
1553
1554
1555 free (result);
1556 result = malloc (size + adj);
1557 if (result == NULL)
1558 return NULL;
1559
1560 lastadj = adj;
1561 adj = alignment - (uintptr_t) result % alignment;
1562 if (adj == alignment)
1563 adj = 0;
1564
1565
1566
1567
1568
1569 } while (adj > lastadj);
1570 }
1571
1572 if (adj != 0)
1573 {
1574
1575
1576
1577
1578 struct alignlist *l;
1579 LOCK_ALIGNED_BLOCKS ();
1580 for (l = _aligned_blocks; l != NULL; l = l->next)
1581 if (l->aligned == NULL)
1582
1583 break;
1584 if (l == NULL)
1585 {
1586 l = malloc (sizeof *l);
1587 if (l != NULL)
1588 {
1589 l->next = _aligned_blocks;
1590 _aligned_blocks = l;
1591 }
1592 }
1593 if (l != NULL)
1594 {
1595 l->exact = result;
1596 result = l->aligned = (char *) result + adj;
1597 }
1598 UNLOCK_ALIGNED_BLOCKS ();
1599 if (l == NULL)
1600 {
1601 free (result);
1602 result = NULL;
1603 }
1604 }
1605
1606 return result;
1607 }
1608
1609
1610 #ifndef HYBRID_MALLOC
1611
1612
1613
1614 void *
1615 memalign (size_t alignment, size_t size)
1616 {
1617 return aligned_alloc (alignment, size);
1618 }
1619
1620
1621
1622 int
1623 posix_memalign (void **memptr, size_t alignment, size_t size)
1624 {
1625 void *mem;
1626
1627 if (alignment == 0
1628 || alignment % sizeof (void *) != 0
1629 || (alignment & (alignment - 1)) != 0)
1630 return EINVAL;
1631
1632 mem = aligned_alloc (alignment, size);
1633 if (mem == NULL)
1634 return ENOMEM;
1635
1636 *memptr = mem;
1637
1638 return 0;
1639 }
1640 #endif
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 #ifndef HYBRID_MALLOC
1662
1663 # ifndef HAVE_MALLOC_H
1664
1665 extern void *valloc (size_t);
1666 # endif
1667
1668 # if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
1669 # include "getpagesize.h"
1670 # elif !defined getpagesize
1671 extern int getpagesize (void);
1672 # endif
1673
1674 static size_t pagesize;
1675
1676 void *
1677 valloc (size_t size)
1678 {
1679 if (pagesize == 0)
1680 pagesize = getpagesize ();
1681
1682 return aligned_alloc (pagesize, size);
1683 }
1684 #endif
1685
1686 #undef malloc
1687 #undef realloc
1688 #undef calloc
1689 #undef aligned_alloc
1690 #undef free
1691
1692 #ifdef HYBRID_MALLOC
1693
1694
1695
1696
1697
1698
1699
1700 static bool
1701 allocated_via_gmalloc (void *ptr)
1702 {
1703 if (!__malloc_initialized)
1704 return false;
1705 size_t block = BLOCK (ptr);
1706 size_t blockmax = _heaplimit - 1;
1707 return block <= blockmax && _heapinfo[block].busy.type != 0;
1708 }
1709
1710
1711
1712
1713 void *
1714 hybrid_malloc (size_t size)
1715 {
1716 if (definitely_will_not_unexec_p ())
1717 return malloc (size);
1718 return gmalloc (size);
1719 }
1720
1721 void *
1722 hybrid_calloc (size_t nmemb, size_t size)
1723 {
1724 if (definitely_will_not_unexec_p ())
1725 return calloc (nmemb, size);
1726 return gcalloc (nmemb, size);
1727 }
1728
1729 static void
1730 hybrid_free_1 (void *ptr)
1731 {
1732 if (allocated_via_gmalloc (ptr))
1733 gfree (ptr);
1734 else
1735 free (ptr);
1736 }
1737
1738 void
1739 hybrid_free (void *ptr)
1740 {
1741
1742 #if defined __GNUC__ && !defined __clang__
1743 int err[2];
1744 err[0] = errno;
1745 err[1] = errno;
1746 errno = 0;
1747 hybrid_free_1 (ptr);
1748 errno = err[errno == 0];
1749 #else
1750 int err = errno;
1751 hybrid_free_1 (ptr);
1752 errno = err;
1753 #endif
1754 }
1755
1756 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
1757 void *
1758 hybrid_aligned_alloc (size_t alignment, size_t size)
1759 {
1760 if (!definitely_will_not_unexec_p ())
1761 return galigned_alloc (alignment, size);
1762
1763 #ifdef HAVE_ALIGNED_ALLOC
1764 return aligned_alloc (alignment, size);
1765 #else
1766 void *p;
1767 return posix_memalign (&p, alignment, size) == 0 ? p : 0;
1768 #endif
1769 }
1770 #endif
1771
1772 void *
1773 hybrid_realloc (void *ptr, size_t size)
1774 {
1775 void *result;
1776 int type;
1777 size_t block, oldsize;
1778
1779 if (!ptr)
1780 return hybrid_malloc (size);
1781 if (!allocated_via_gmalloc (ptr))
1782 return realloc (ptr, size);
1783 if (!definitely_will_not_unexec_p ())
1784 return grealloc (ptr, size);
1785
1786
1787
1788
1789 block = BLOCK (ptr);
1790 type = _heapinfo[block].busy.type;
1791 oldsize =
1792 type < 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
1793 : (size_t) 1 << type;
1794 result = malloc (size);
1795 if (result)
1796 return memcpy (result, ptr, min (oldsize, size));
1797 return result;
1798 }
1799
1800 #else
1801
1802 void *
1803 malloc (size_t size)
1804 {
1805 return gmalloc (size);
1806 }
1807
1808 void *
1809 calloc (size_t nmemb, size_t size)
1810 {
1811 return gcalloc (nmemb, size);
1812 }
1813
1814 void
1815 free (void *ptr)
1816 {
1817 gfree (ptr);
1818 }
1819
1820 void *
1821 aligned_alloc (size_t alignment, size_t size)
1822 {
1823 return galigned_alloc (alignment, size);
1824 }
1825
1826 void *
1827 realloc (void *ptr, size_t size)
1828 {
1829 return grealloc (ptr, size);
1830 }
1831
1832 #endif
1833
1834 #ifdef GC_MCHECK
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856 #include <stdio.h>
1857
1858
1859 static void (*old_free_hook) (void *ptr);
1860 static void *(*old_malloc_hook) (size_t size);
1861 static void *(*old_realloc_hook) (void *ptr, size_t size);
1862
1863
1864 static void (*abortfunc) (enum mcheck_status);
1865
1866
1867 #define MAGICWORD (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
1868 #define MAGICFREE (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
1869 #define MAGICBYTE ((char) 0xd7)
1870 #define MALLOCFLOOD ((char) 0x93)
1871 #define FREEFLOOD ((char) 0x95)
1872
1873 struct hdr
1874 {
1875 size_t size;
1876 size_t magic;
1877 };
1878
1879 static enum mcheck_status
1880 checkhdr (const struct hdr *hdr)
1881 {
1882 enum mcheck_status status;
1883 switch (hdr->magic)
1884 {
1885 default:
1886 status = MCHECK_HEAD;
1887 break;
1888 case MAGICFREE:
1889 status = MCHECK_FREE;
1890 break;
1891 case MAGICWORD:
1892 if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
1893 status = MCHECK_TAIL;
1894 else
1895 status = MCHECK_OK;
1896 break;
1897 }
1898 if (status != MCHECK_OK)
1899 (*abortfunc) (status);
1900 return status;
1901 }
1902
1903 static void
1904 freehook (void *ptr)
1905 {
1906 struct hdr *hdr;
1907
1908 if (ptr)
1909 {
1910 struct alignlist *l;
1911
1912
1913
1914 PROTECT_MALLOC_STATE (0);
1915 LOCK_ALIGNED_BLOCKS ();
1916 for (l = _aligned_blocks; l != NULL; l = l->next)
1917 if (l->aligned == ptr)
1918 {
1919 l->aligned = NULL;
1920 ptr = l->exact;
1921 break;
1922 }
1923 UNLOCK_ALIGNED_BLOCKS ();
1924 PROTECT_MALLOC_STATE (1);
1925
1926 hdr = ((struct hdr *) ptr) - 1;
1927 checkhdr (hdr);
1928 hdr->magic = MAGICFREE;
1929 memset (ptr, FREEFLOOD, hdr->size);
1930 }
1931 else
1932 hdr = NULL;
1933
1934 gfree_hook = old_free_hook;
1935 free (hdr);
1936 gfree_hook = freehook;
1937 }
1938
1939 static void *
1940 mallochook (size_t size)
1941 {
1942 struct hdr *hdr;
1943
1944 gmalloc_hook = old_malloc_hook;
1945 hdr = malloc (sizeof *hdr + size + 1);
1946 gmalloc_hook = mallochook;
1947 if (hdr == NULL)
1948 return NULL;
1949
1950 hdr->size = size;
1951 hdr->magic = MAGICWORD;
1952 ((char *) &hdr[1])[size] = MAGICBYTE;
1953 return memset (hdr + 1, MALLOCFLOOD, size);
1954 }
1955
1956 static void *
1957 reallochook (void *ptr, size_t size)
1958 {
1959 struct hdr *hdr = NULL;
1960 size_t osize = 0;
1961
1962 if (ptr)
1963 {
1964 hdr = ((struct hdr *) ptr) - 1;
1965 osize = hdr->size;
1966
1967 checkhdr (hdr);
1968 if (size < osize)
1969 memset ((char *) ptr + size, FREEFLOOD, osize - size);
1970 }
1971
1972 gfree_hook = old_free_hook;
1973 gmalloc_hook = old_malloc_hook;
1974 grealloc_hook = old_realloc_hook;
1975 hdr = realloc (hdr, sizeof *hdr + size + 1);
1976 gfree_hook = freehook;
1977 gmalloc_hook = mallochook;
1978 grealloc_hook = reallochook;
1979 if (hdr == NULL)
1980 return NULL;
1981
1982 hdr->size = size;
1983 hdr->magic = MAGICWORD;
1984 ((char *) &hdr[1])[size] = MAGICBYTE;
1985 if (size > osize)
1986 memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
1987 return hdr + 1;
1988 }
1989
1990 static void
1991 mabort (enum mcheck_status status)
1992 {
1993 const char *msg;
1994 switch (status)
1995 {
1996 case MCHECK_OK:
1997 msg = "memory is consistent, library is buggy";
1998 break;
1999 case MCHECK_HEAD:
2000 msg = "memory clobbered before allocated block";
2001 break;
2002 case MCHECK_TAIL:
2003 msg = "memory clobbered past end of allocated block";
2004 break;
2005 case MCHECK_FREE:
2006 msg = "block freed twice";
2007 break;
2008 default:
2009 msg = "bogus mcheck_status, library is buggy";
2010 break;
2011 }
2012 #ifdef __GNU_LIBRARY__
2013 __libc_fatal (msg);
2014 #else
2015 fprintf (stderr, "mcheck: %s\n", msg);
2016 emacs_abort ();
2017 #endif
2018 }
2019
2020 static int mcheck_used = 0;
2021
2022 int
2023 mcheck (void (*func) (enum mcheck_status))
2024 {
2025 abortfunc = (func != NULL) ? func : &mabort;
2026
2027
2028 if (!__malloc_initialized && !mcheck_used)
2029 {
2030 old_free_hook = gfree_hook;
2031 gfree_hook = freehook;
2032 old_malloc_hook = gmalloc_hook;
2033 gmalloc_hook = mallochook;
2034 old_realloc_hook = grealloc_hook;
2035 grealloc_hook = reallochook;
2036 mcheck_used = 1;
2037 }
2038
2039 return mcheck_used ? 0 : -1;
2040 }
2041
2042 enum mcheck_status
2043 mprobe (void *ptr)
2044 {
2045 return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
2046 }
2047
2048 #endif