root/src/gmalloc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. protect_malloc_state
  2. align
  3. get_contiguous_space
  4. register_heapinfo
  5. malloc_atfork_handler_prepare
  6. malloc_atfork_handler_parent
  7. malloc_atfork_handler_child
  8. malloc_enable_thread
  9. malloc_initialize_1
  10. __malloc_initialize
  11. morecore_nolock
  12. _malloc_internal_nolock
  13. _malloc_internal
  14. malloc
  15. _malloc
  16. _free
  17. _realloc
  18. _free_internal_nolock
  19. _free_internal
  20. free
  21. weak_alias
  22. _realloc_internal_nolock
  23. _realloc_internal
  24. realloc
  25. calloc
  26. gdefault_morecore
  27. aligned_alloc
  28. memalign
  29. posix_memalign
  30. valloc
  31. allocated_via_gmalloc
  32. hybrid_malloc
  33. hybrid_calloc
  34. hybrid_free_1
  35. hybrid_free
  36. hybrid_aligned_alloc
  37. hybrid_realloc
  38. malloc
  39. calloc
  40. free
  41. aligned_alloc
  42. realloc
  43. checkhdr
  44. freehook
  45. mallochook
  46. reallochook
  47. mabort
  48. mcheck
  49. mprobe

     1 /* Declarations for `malloc' and friends.
     2    Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2023 Free
     3    Software Foundation, Inc.
     4                   Written May 1989 by Mike Haertel.
     5 
     6 This library is free software; you can redistribute it and/or
     7 modify it under the terms of the GNU General Public License as
     8 published by the Free Software Foundation; either version 2 of the
     9 License, or (at your option) any later version.
    10 
    11 This library is distributed in the hope that it will be useful,
    12 but WITHOUT ANY WARRANTY; without even the implied warranty of
    13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    14 General Public License for more details.
    15 
    16 You should have received a copy of the GNU General Public
    17 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
    18 
    19    The author may be reached (Email) at the address mike@ai.mit.edu,
    20    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
    21 
    22 #include <config.h>
    23 
    24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
    25 #define USE_PTHREAD
    26 #endif
    27 
    28 #include <stddef.h>
    29 #include <stdlib.h>
    30 #include <string.h>
    31 #include <limits.h>
    32 #include <stdint.h>
    33 #include <unistd.h>
    34 
    35 #ifdef USE_PTHREAD
    36 #include <pthread.h>
    37 #endif
    38 
    39 #include "lisp.h"
    40 
    41 #ifdef HAVE_MALLOC_H
    42 # if GNUC_PREREQ (4, 2, 0)
    43 #  pragma GCC diagnostic ignored "-Wdeprecated-declarations"
    44 # endif
    45 # include <malloc.h>
    46 #endif
    47 #ifndef __MALLOC_HOOK_VOLATILE
    48 # define __MALLOC_HOOK_VOLATILE volatile
    49 #endif
    50 #ifndef HAVE_MALLOC_H
    51 extern void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
    52 extern void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
    53 extern void *(*__morecore) (ptrdiff_t);
    54 #endif
    55 
    56 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
    57    realloc... as defined in this file (and renamed gmalloc,
    58    grealloc... via the macros that follow).  The dumped emacs,
    59    however, will use the system malloc, realloc....  In other source
    60    files, malloc, realloc... are renamed hybrid_malloc,
    61    hybrid_realloc... via macros in conf_post.h.  hybrid_malloc and
    62    friends are wrapper functions defined later in this file.  */
    63 #undef malloc
    64 #undef realloc
    65 #undef calloc
    66 #undef aligned_alloc
    67 #undef free
    68 #define malloc gmalloc
    69 #define realloc grealloc
    70 #define calloc gcalloc
    71 #define aligned_alloc galigned_alloc
    72 #define free gfree
    73 #define malloc_info gmalloc_info
    74 
    75 #ifdef HYBRID_MALLOC
    76 # include "sheap.h"
    77 #endif
    78 
    79 #ifdef  __cplusplus
    80 extern "C"
    81 {
    82 #endif
    83 
    84 #ifdef HYBRID_MALLOC
    85 #define extern static
    86 #endif
    87 
    88 /* Allocate SIZE bytes of memory.  */
    89 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
    90 /* Re-allocate the previously allocated block
    91    in ptr, making the new block SIZE bytes long.  */
    92 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
    93 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
    94 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
    95 /* Free a block.  */
    96 extern void free (void *ptr);
    97 
    98 /* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
    99 extern void *aligned_alloc (size_t, size_t);
   100 #ifdef MSDOS
   101 extern void *memalign (size_t, size_t);
   102 extern int posix_memalign (void **, size_t, size_t);
   103 #endif
   104 
   105 /* The allocator divides the heap into blocks of fixed size; large
   106    requests receive one or more whole blocks, and small requests
   107    receive a fragment of a block.  Fragment sizes are powers of two,
   108    and all fragments of a block are the same size.  When all the
   109    fragments in a block have been freed, the block itself is freed.  */
   110 #define BLOCKLOG        (INT_WIDTH > 16 ? 12 : 9)
   111 #define BLOCKSIZE       (1 << BLOCKLOG)
   112 #define BLOCKIFY(SIZE)  (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
   113 
   114 /* Determine the amount of memory spanned by the initial heap table
   115    (not an absolute limit).  */
   116 #define HEAP            (INT_WIDTH > 16 ? 4194304 : 65536)
   117 
   118 /* Number of contiguous free blocks allowed to build up at the end of
   119    memory before they will be returned to the system.  */
   120 #define FINAL_FREE_BLOCKS       8
   121 
   122 /* Data structure giving per-block information.  */
   123 typedef union
   124   {
   125     /* Heap information for a busy block.  */
   126     struct
   127       {
   128         /* Zero for a block that is not one of ours (typically,
   129            allocated by system malloc), positive for the log base 2 of
   130            the fragment size of a fragmented block, -1 for the first
   131            block of a multiblock object, and unspecified for later
   132            blocks of that object.  Type-0 blocks can be present
   133            because the system malloc can be invoked by library
   134            functions in an undumped Emacs.  */
   135         int type;
   136         union
   137           {
   138             struct
   139               {
   140                 size_t nfree; /* Free frags in a fragmented block.  */
   141                 size_t first; /* First free fragment of the block.  */
   142               } frag;
   143             /* For a large object, in its first block, this has the number
   144                of blocks in the object.  */
   145             ptrdiff_t size;
   146           } info;
   147       } busy;
   148     /* Heap information for a free block
   149        (that may be the first of a free cluster).  */
   150     struct
   151       {
   152         size_t size;    /* Size (in blocks) of a free cluster.  */
   153         size_t next;    /* Index of next free cluster.  */
   154         size_t prev;    /* Index of previous free cluster.  */
   155       } free;
   156   } malloc_info;
   157 
   158 /* Pointer to first block of the heap.  */
   159 extern char *_heapbase;
   160 
   161 /* Table indexed by block number giving per-block information.  */
   162 extern malloc_info *_heapinfo;
   163 
   164 /* Address to block number and vice versa.  */
   165 #define BLOCK(A)        ((size_t) ((char *) (A) - _heapbase) / BLOCKSIZE + 1)
   166 #define ADDRESS(B)      ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
   167 
   168 /* Current search index for the heap table.  */
   169 extern size_t _heapindex;
   170 
   171 /* Limit of valid info table indices.  */
   172 extern size_t _heaplimit;
   173 
   174 /* Doubly linked lists of free fragments.  */
   175 struct list
   176   {
   177     struct list *next;
   178     struct list *prev;
   179   };
   180 
   181 /* Free list headers for each fragment size.  */
   182 static struct list _fraghead[BLOCKLOG];
   183 
   184 /* List of blocks allocated with aligned_alloc and friends.  */
   185 struct alignlist
   186   {
   187     struct alignlist *next;
   188     void *aligned;              /* The address that aligned_alloc returned.  */
   189     void *exact;                /* The address that malloc returned.  */
   190   };
   191 extern struct alignlist *_aligned_blocks;
   192 
   193 /* Instrumentation.  */
   194 extern size_t _chunks_used;
   195 extern size_t _bytes_used;
   196 extern size_t _chunks_free;
   197 extern size_t _bytes_free;
   198 
   199 /* Internal versions of `malloc', `realloc', and `free'
   200    used when these functions need to call each other.
   201    They are the same but don't call the hooks.  */
   202 extern void *_malloc_internal (size_t);
   203 extern void *_realloc_internal (void *, size_t);
   204 extern void _free_internal (void *);
   205 extern void *_malloc_internal_nolock (size_t);
   206 extern void *_realloc_internal_nolock (void *, size_t);
   207 extern void _free_internal_nolock (void *);
   208 
   209 #ifdef USE_PTHREAD
   210 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
   211 extern int _malloc_thread_enabled_p;
   212 #define LOCK()                                  \
   213   do {                                          \
   214     if (_malloc_thread_enabled_p)               \
   215       pthread_mutex_lock (&_malloc_mutex);      \
   216   } while (0)
   217 #define UNLOCK()                                \
   218   do {                                          \
   219     if (_malloc_thread_enabled_p)               \
   220       pthread_mutex_unlock (&_malloc_mutex);    \
   221   } while (0)
   222 #define LOCK_ALIGNED_BLOCKS()                           \
   223   do {                                                  \
   224     if (_malloc_thread_enabled_p)                       \
   225       pthread_mutex_lock (&_aligned_blocks_mutex);      \
   226   } while (0)
   227 #define UNLOCK_ALIGNED_BLOCKS()                         \
   228   do {                                                  \
   229     if (_malloc_thread_enabled_p)                       \
   230       pthread_mutex_unlock (&_aligned_blocks_mutex);    \
   231   } while (0)
   232 #else
   233 #define LOCK()
   234 #define UNLOCK()
   235 #define LOCK_ALIGNED_BLOCKS()
   236 #define UNLOCK_ALIGNED_BLOCKS()
   237 #endif
   238 
   239 /* Nonzero if `malloc' has been called and done its initialization.  */
   240 extern int __malloc_initialized;
   241 /* Function called to initialize malloc data structures.  */
   242 extern int __malloc_initialize (void);
   243 
   244 #ifdef GC_MCHECK
   245 
   246 /* Return values for `mprobe': these are the kinds of inconsistencies that
   247    `mcheck' enables detection of.  */
   248 enum mcheck_status
   249   {
   250     MCHECK_DISABLED = -1,       /* Consistency checking is not turned on.  */
   251     MCHECK_OK,                  /* Block is fine.  */
   252     MCHECK_FREE,                /* Block freed twice.  */
   253     MCHECK_HEAD,                /* Memory before the block was clobbered.  */
   254     MCHECK_TAIL                 /* Memory after the block was clobbered.  */
   255   };
   256 
   257 /* Activate a standard collection of debugging hooks.  This must be called
   258    before `malloc' is ever called.  ABORTFUNC is called with an error code
   259    (see enum above) when an inconsistency is detected.  If ABORTFUNC is
   260    null, the standard function prints on stderr and then calls `abort'.  */
   261 extern int mcheck (void (*abortfunc) (enum mcheck_status));
   262 
   263 /* Check for aberrations in a particular malloc'd block.  You must have
   264    called `mcheck' already.  These are the same checks that `mcheck' does
   265    when you free or reallocate a block.  */
   266 extern enum mcheck_status mprobe (void *ptr);
   267 
   268 /* Activate a standard collection of tracing hooks.  */
   269 extern void mtrace (void);
   270 extern void muntrace (void);
   271 
   272 /* Statistics available to the user.  */
   273 struct mstats
   274   {
   275     size_t bytes_total; /* Total size of the heap. */
   276     size_t chunks_used; /* Chunks allocated by the user. */
   277     size_t bytes_used;  /* Byte total of user-allocated chunks. */
   278     size_t chunks_free; /* Chunks in the free list. */
   279     size_t bytes_free;  /* Byte total of chunks in the free list. */
   280   };
   281 
   282 /* Pick up the current statistics. */
   283 extern struct mstats mstats (void);
   284 
   285 #endif
   286 
   287 #undef extern
   288 
   289 #ifdef  __cplusplus
   290 }
   291 #endif
   292 
   293 /* Memory allocator `malloc'.
   294    Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
   295                   Written May 1989 by Mike Haertel.
   296 
   297 This library is free software; you can redistribute it and/or
   298 modify it under the terms of the GNU General Public License as
   299 published by the Free Software Foundation; either version 2 of the
   300 License, or (at your option) any later version.
   301 
   302 This library is distributed in the hope that it will be useful,
   303 but WITHOUT ANY WARRANTY; without even the implied warranty of
   304 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   305 General Public License for more details.
   306 
   307 You should have received a copy of the GNU General Public
   308 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
   309 
   310    The author may be reached (Email) at the address mike@ai.mit.edu,
   311    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
   312 
   313 #include <errno.h>
   314 
   315 /* Debugging hook for 'malloc'.  */
   316 static void *(*__MALLOC_HOOK_VOLATILE gmalloc_hook) (size_t);
   317 
   318 /* Replacements for traditional glibc malloc hooks, for platforms that
   319    do not already have these hooks.  Platforms with these hooks all
   320    used relaxed ref/def, so it is OK to define them here too.  */
   321 void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void);
   322 void (*__MALLOC_HOOK_VOLATILE __after_morecore_hook) (void);
   323 void *(*__morecore) (ptrdiff_t);
   324 
   325 #ifndef HYBRID_MALLOC
   326 
   327 /* Pointer to the base of the first block.  */
   328 char *_heapbase;
   329 
   330 /* Block information table.  Allocated with align/__free (not malloc/free).  */
   331 malloc_info *_heapinfo;
   332 
   333 /* Search index in the info table.  */
   334 size_t _heapindex;
   335 
   336 /* Limit of valid info table indices.  */
   337 size_t _heaplimit;
   338 
   339 /* Instrumentation.  */
   340 size_t _chunks_used;
   341 size_t _bytes_used;
   342 size_t _chunks_free;
   343 size_t _bytes_free;
   344 
   345 /* Are you experienced?  */
   346 int __malloc_initialized;
   347 
   348 #endif /* HYBRID_MALLOC */
   349 
   350 /* Number of extra blocks to get each time we ask for more core.
   351    This reduces the frequency of calling `(*__morecore)'.  */
   352 #if defined DOUG_LEA_MALLOC || defined HYBRID_MALLOC || defined SYSTEM_MALLOC
   353 static
   354 #endif
   355 size_t __malloc_extra_blocks;
   356 
   357 /* Number of info entries.  */
   358 static size_t heapsize;
   359 
   360 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
   361 
   362 /* Some code for hunting a bug writing into _heapinfo.
   363 
   364    Call this macro with argument PROT non-zero to protect internal
   365    malloc state against writing to it, call it with a zero argument to
   366    make it readable and writable.
   367 
   368    Note that this only works if BLOCKSIZE == page size, which is
   369    the case on the i386.  */
   370 
   371 #include <sys/types.h>
   372 #include <sys/mman.h>
   373 
   374 static int state_protected_p;
   375 static size_t last_state_size;
   376 static malloc_info *last_heapinfo;
   377 
   378 void
   379 protect_malloc_state (int protect_p)
   380 {
   381   /* If _heapinfo has been relocated, make sure its old location
   382      isn't left read-only; it will be reused by malloc.  */
   383   if (_heapinfo != last_heapinfo
   384       && last_heapinfo
   385       && state_protected_p)
   386     mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
   387 
   388   last_state_size = _heaplimit * sizeof *_heapinfo;
   389   last_heapinfo   = _heapinfo;
   390 
   391   if (protect_p != state_protected_p)
   392     {
   393       state_protected_p = protect_p;
   394       if (mprotect (_heapinfo, last_state_size,
   395                     protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
   396         abort ();
   397     }
   398 }
   399 
   400 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
   401 
   402 #else
   403 #define PROTECT_MALLOC_STATE(PROT)      /* empty */
   404 #endif
   405 
   406 
   407 /* Aligned allocation.  */
   408 static void *
   409 align (size_t size)
   410 {
   411   void *result;
   412   ptrdiff_t adj;
   413 
   414   /* align accepts an unsigned argument, but __morecore accepts a
   415      signed one.  This could lead to trouble if SIZE overflows the
   416      ptrdiff_t type accepted by __morecore.  We just punt in that
   417      case, since they are requesting a ludicrous amount anyway.  */
   418   if (PTRDIFF_MAX < size)
   419     result = 0;
   420   else
   421     result = (*__morecore) (size);
   422   adj = (uintptr_t) result % BLOCKSIZE;
   423   if (adj != 0)
   424     {
   425       adj = BLOCKSIZE - adj;
   426       (*__morecore) (adj);
   427       result = (char *) result + adj;
   428     }
   429 
   430   if (__after_morecore_hook)
   431     (*__after_morecore_hook) ();
   432 
   433   return result;
   434 }
   435 
   436 /* Get SIZE bytes, if we can get them starting at END.
   437    Return the address of the space we got.
   438    If we cannot get space at END, fail and return 0.  */
   439 static void *
   440 get_contiguous_space (ptrdiff_t size, void *position)
   441 {
   442   void *before;
   443   void *after;
   444 
   445   before = (*__morecore) (0);
   446   /* If we can tell in advance that the break is at the wrong place,
   447      fail now.  */
   448   if (before != position)
   449     return 0;
   450 
   451   /* Allocate SIZE bytes and get the address of them.  */
   452   after = (*__morecore) (size);
   453   if (!after)
   454     return 0;
   455 
   456   /* It was not contiguous--reject it.  */
   457   if (after != position)
   458     {
   459       (*__morecore) (- size);
   460       return 0;
   461     }
   462 
   463   return after;
   464 }
   465 
   466 
   467 /* This is called when `_heapinfo' and `heapsize' have just
   468    been set to describe a new info table.  Set up the table
   469    to describe itself and account for it in the statistics.  */
   470 static void
   471 register_heapinfo (void)
   472 {
   473   size_t block, blocks;
   474 
   475   block = BLOCK (_heapinfo);
   476   blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
   477 
   478   /* Account for the _heapinfo block itself in the statistics.  */
   479   _bytes_used += blocks * BLOCKSIZE;
   480   ++_chunks_used;
   481 
   482   /* Describe the heapinfo block itself in the heapinfo.  */
   483   _heapinfo[block].busy.type = -1;
   484   _heapinfo[block].busy.info.size = blocks;
   485 }
   486 
   487 #ifdef USE_PTHREAD
   488 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
   489 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
   490 int _malloc_thread_enabled_p;
   491 
   492 static void
   493 malloc_atfork_handler_prepare (void)
   494 {
   495   LOCK ();
   496   LOCK_ALIGNED_BLOCKS ();
   497 }
   498 
   499 static void
   500 malloc_atfork_handler_parent (void)
   501 {
   502   UNLOCK_ALIGNED_BLOCKS ();
   503   UNLOCK ();
   504 }
   505 
   506 static void
   507 malloc_atfork_handler_child (void)
   508 {
   509   UNLOCK_ALIGNED_BLOCKS ();
   510   UNLOCK ();
   511 }
   512 
   513 /* Set up mutexes and make malloc etc. thread-safe.  */
   514 void
   515 malloc_enable_thread (void)
   516 {
   517   if (_malloc_thread_enabled_p)
   518     return;
   519 
   520   /* Some pthread implementations call malloc for statically
   521      initialized mutexes when they are used first.  To avoid such a
   522      situation, we initialize mutexes here while their use is
   523      disabled in malloc etc.  */
   524   pthread_mutex_init (&_malloc_mutex, NULL);
   525   pthread_mutex_init (&_aligned_blocks_mutex, NULL);
   526   pthread_atfork (malloc_atfork_handler_prepare,
   527                   malloc_atfork_handler_parent,
   528                   malloc_atfork_handler_child);
   529   _malloc_thread_enabled_p = 1;
   530 }
   531 #endif  /* USE_PTHREAD */
   532 
   533 static void
   534 malloc_initialize_1 (void)
   535 {
   536 #ifdef GC_MCHECK
   537   mcheck (NULL);
   538 #endif
   539 
   540   if (__malloc_initialize_hook)
   541     (*__malloc_initialize_hook) ();
   542 
   543   heapsize = HEAP / BLOCKSIZE;
   544   _heapinfo = align (heapsize * sizeof (malloc_info));
   545   if (_heapinfo == NULL)
   546     return;
   547   memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
   548   _heapinfo[0].free.size = 0;
   549   _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
   550   _heapindex = 0;
   551   _heapbase = (char *) _heapinfo;
   552   _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
   553 
   554   register_heapinfo ();
   555 
   556   __malloc_initialized = 1;
   557   PROTECT_MALLOC_STATE (1);
   558   return;
   559 }
   560 
   561 /* Set everything up and remember that we have.
   562    main will call malloc which calls this function.  That is before any threads
   563    or signal handlers has been set up, so we don't need thread protection.  */
   564 int
   565 __malloc_initialize (void)
   566 {
   567   if (__malloc_initialized)
   568     return 0;
   569 
   570   malloc_initialize_1 ();
   571 
   572   return __malloc_initialized;
   573 }
   574 
   575 static int morecore_recursing;
   576 
   577 /* Get neatly aligned memory, initializing or
   578    growing the heap info table as necessary. */
   579 static void *
   580 morecore_nolock (size_t size)
   581 {
   582   void *result;
   583   malloc_info *newinfo, *oldinfo;
   584   size_t newsize;
   585 
   586   if (morecore_recursing)
   587     /* Avoid recursion.  The caller will know how to handle a null return.  */
   588     return NULL;
   589 
   590   result = align (size);
   591   if (result == NULL)
   592     return NULL;
   593 
   594   PROTECT_MALLOC_STATE (0);
   595 
   596   /* Check if we need to grow the info table.  */
   597   if (heapsize < BLOCK ((char *) result + size))
   598     {
   599       /* Calculate the new _heapinfo table size.  We do not account for the
   600          added blocks in the table itself, as we hope to place them in
   601          existing free space, which is already covered by part of the
   602          existing table.  */
   603       newsize = heapsize;
   604       do
   605         newsize *= 2;
   606       while (newsize < BLOCK ((char *) result + size));
   607 
   608       /* We must not reuse existing core for the new info table when called
   609          from realloc in the case of growing a large block, because the
   610          block being grown is momentarily marked as free.  In this case
   611          _heaplimit is zero so we know not to reuse space for internal
   612          allocation.  */
   613       if (_heaplimit != 0)
   614         {
   615           /* First try to allocate the new info table in core we already
   616              have, in the usual way using realloc.  If realloc cannot
   617              extend it in place or relocate it to existing sufficient core,
   618              we will get called again, and the code above will notice the
   619              `morecore_recursing' flag and return null.  */
   620           int save = errno;     /* Don't want to clobber errno with ENOMEM.  */
   621           morecore_recursing = 1;
   622           newinfo = _realloc_internal_nolock (_heapinfo,
   623                                               newsize * sizeof (malloc_info));
   624           morecore_recursing = 0;
   625           if (newinfo == NULL)
   626             errno = save;
   627           else
   628             {
   629               /* We found some space in core, and realloc has put the old
   630                  table's blocks on the free list.  Now zero the new part
   631                  of the table and install the new table location.  */
   632               memset (&newinfo[heapsize], 0,
   633                       (newsize - heapsize) * sizeof (malloc_info));
   634               _heapinfo = newinfo;
   635               heapsize = newsize;
   636               goto got_heap;
   637             }
   638         }
   639 
   640       /* Allocate new space for the malloc info table.  */
   641       while (1)
   642         {
   643           newinfo = align (newsize * sizeof (malloc_info));
   644 
   645           /* Did it fail?  */
   646           if (newinfo == NULL)
   647             {
   648               (*__morecore) (-size);
   649               return NULL;
   650             }
   651 
   652           /* Is it big enough to record status for its own space?
   653              If so, we win.  */
   654           if (BLOCK ((char *) newinfo + newsize * sizeof (malloc_info))
   655               < newsize)
   656             break;
   657 
   658           /* Must try again.  First give back most of what we just got.  */
   659           (*__morecore) (- newsize * sizeof (malloc_info));
   660           newsize *= 2;
   661         }
   662 
   663       /* Copy the old table to the beginning of the new,
   664          and zero the rest of the new table.  */
   665       memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
   666       memset (&newinfo[heapsize], 0,
   667               (newsize - heapsize) * sizeof (malloc_info));
   668       oldinfo = _heapinfo;
   669       _heapinfo = newinfo;
   670       heapsize = newsize;
   671 
   672       register_heapinfo ();
   673 
   674       /* Reset _heaplimit so _free_internal never decides
   675          it can relocate or resize the info table.  */
   676       _heaplimit = 0;
   677       _free_internal_nolock (oldinfo);
   678       PROTECT_MALLOC_STATE (0);
   679 
   680       /* The new heap limit includes the new table just allocated.  */
   681       _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
   682       return result;
   683     }
   684 
   685  got_heap:
   686   _heaplimit = BLOCK ((char *) result + size);
   687   return result;
   688 }
   689 
   690 /* Allocate memory from the heap.  */
   691 void *
   692 _malloc_internal_nolock (size_t size)
   693 {
   694   void *result;
   695   size_t block, blocks, lastblocks, start;
   696   register size_t i;
   697   struct list *next;
   698 
   699   /* ANSI C allows `malloc (0)' to either return NULL, or to return a
   700      valid address you can realloc and free (though not dereference).
   701 
   702      It turns out that some extant code (sunrpc, at least Ultrix's version)
   703      expects `malloc (0)' to return non-NULL and breaks otherwise.
   704      Be compatible.  */
   705 
   706 #if     0
   707   if (size == 0)
   708     return NULL;
   709 #endif
   710 
   711   PROTECT_MALLOC_STATE (0);
   712 
   713   if (size < sizeof (struct list))
   714     size = sizeof (struct list);
   715 
   716   /* Determine the allocation policy based on the request size.  */
   717   if (size <= BLOCKSIZE / 2)
   718     {
   719       /* Small allocation to receive a fragment of a block.
   720          Determine the logarithm to base two of the fragment size. */
   721       register size_t log = 1;
   722       --size;
   723       while ((size /= 2) != 0)
   724         ++log;
   725 
   726       /* Look in the fragment lists for a
   727          free fragment of the desired size. */
   728       next = _fraghead[log].next;
   729       if (next != NULL)
   730         {
   731           /* There are free fragments of this size.
   732              Pop a fragment out of the fragment list and return it.
   733              Update the block's nfree and first counters. */
   734           result = next;
   735           next->prev->next = next->next;
   736           if (next->next != NULL)
   737             next->next->prev = next->prev;
   738           block = BLOCK (result);
   739           if (--_heapinfo[block].busy.info.frag.nfree != 0)
   740             _heapinfo[block].busy.info.frag.first =
   741               (uintptr_t) next->next % BLOCKSIZE >> log;
   742 
   743           /* Update the statistics.  */
   744           ++_chunks_used;
   745           _bytes_used += 1 << log;
   746           --_chunks_free;
   747           _bytes_free -= 1 << log;
   748         }
   749       else
   750         {
   751           /* No free fragments of the desired size, so get a new block
   752              and break it into fragments, returning the first.  */
   753 #ifdef GC_MALLOC_CHECK
   754           result = _malloc_internal_nolock (BLOCKSIZE);
   755           PROTECT_MALLOC_STATE (0);
   756 #elif defined (USE_PTHREAD)
   757           result = _malloc_internal_nolock (BLOCKSIZE);
   758 #else
   759           result = malloc (BLOCKSIZE);
   760 #endif
   761           if (result == NULL)
   762             {
   763               PROTECT_MALLOC_STATE (1);
   764               goto out;
   765             }
   766 
   767           /* Link all fragments but the first into the free list.  */
   768           next = (struct list *) ((char *) result + (1 << log));
   769           next->next = NULL;
   770           next->prev = &_fraghead[log];
   771           _fraghead[log].next = next;
   772 
   773           for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
   774             {
   775               next = (struct list *) ((char *) result + (i << log));
   776               next->next = _fraghead[log].next;
   777               next->prev = &_fraghead[log];
   778               next->prev->next = next;
   779               next->next->prev = next;
   780             }
   781 
   782           /* Initialize the nfree and first counters for this block.  */
   783           block = BLOCK (result);
   784           _heapinfo[block].busy.type = log;
   785           _heapinfo[block].busy.info.frag.nfree = i - 1;
   786           _heapinfo[block].busy.info.frag.first = i - 1;
   787 
   788           _chunks_free += (BLOCKSIZE >> log) - 1;
   789           _bytes_free += BLOCKSIZE - (1 << log);
   790           _bytes_used -= BLOCKSIZE - (1 << log);
   791         }
   792     }
   793   else
   794     {
   795       /* Large allocation to receive one or more blocks.
   796          Search the free list in a circle starting at the last place visited.
   797          If we loop completely around without finding a large enough
   798          space we will have to get more memory from the system.  */
   799       blocks = BLOCKIFY (size);
   800       start = block = _heapindex;
   801       while (_heapinfo[block].free.size < blocks)
   802         {
   803           block = _heapinfo[block].free.next;
   804           if (block == start)
   805             {
   806               /* Need to get more from the system.  Get a little extra.  */
   807               size_t wantblocks = blocks + __malloc_extra_blocks;
   808               block = _heapinfo[0].free.prev;
   809               lastblocks = _heapinfo[block].free.size;
   810               /* Check to see if the new core will be contiguous with the
   811                  final free block; if so we don't need to get as much.  */
   812               if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
   813                   /* We can't do this if we will have to make the heap info
   814                      table bigger to accommodate the new space.  */
   815                   block + wantblocks <= heapsize &&
   816                   get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
   817                                         ADDRESS (block + lastblocks)))
   818                 {
   819                   /* We got it contiguously.  Which block we are extending
   820                      (the `final free block' referred to above) might have
   821                      changed, if it got combined with a freed info table.  */
   822                   block = _heapinfo[0].free.prev;
   823                   _heapinfo[block].free.size += (wantblocks - lastblocks);
   824                   _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
   825                   _heaplimit += wantblocks - lastblocks;
   826                   continue;
   827                 }
   828               result = morecore_nolock (wantblocks * BLOCKSIZE);
   829               if (result == NULL)
   830                 goto out;
   831               block = BLOCK (result);
   832               /* Put the new block at the end of the free list.  */
   833               _heapinfo[block].free.size = wantblocks;
   834               _heapinfo[block].free.prev = _heapinfo[0].free.prev;
   835               _heapinfo[block].free.next = 0;
   836               _heapinfo[0].free.prev = block;
   837               _heapinfo[_heapinfo[block].free.prev].free.next = block;
   838               ++_chunks_free;
   839               /* Now loop to use some of that block for this allocation.  */
   840             }
   841         }
   842 
   843       /* At this point we have found a suitable free list entry.
   844          Figure out how to remove what we need from the list. */
   845       result = ADDRESS (block);
   846       if (_heapinfo[block].free.size > blocks)
   847         {
   848           /* The block we found has a bit left over,
   849              so relink the tail end back into the free list. */
   850           _heapinfo[block + blocks].free.size
   851             = _heapinfo[block].free.size - blocks;
   852           _heapinfo[block + blocks].free.next
   853             = _heapinfo[block].free.next;
   854           _heapinfo[block + blocks].free.prev
   855             = _heapinfo[block].free.prev;
   856           _heapinfo[_heapinfo[block].free.prev].free.next
   857             = _heapinfo[_heapinfo[block].free.next].free.prev
   858             = _heapindex = block + blocks;
   859         }
   860       else
   861         {
   862           /* The block exactly matches our requirements,
   863              so just remove it from the list. */
   864           _heapinfo[_heapinfo[block].free.next].free.prev
   865             = _heapinfo[block].free.prev;
   866           _heapinfo[_heapinfo[block].free.prev].free.next
   867             = _heapindex = _heapinfo[block].free.next;
   868           --_chunks_free;
   869         }
   870 
   871       _heapinfo[block].busy.type = -1;
   872       _heapinfo[block].busy.info.size = blocks;
   873       ++_chunks_used;
   874       _bytes_used += blocks * BLOCKSIZE;
   875       _bytes_free -= blocks * BLOCKSIZE;
   876     }
   877 
   878   PROTECT_MALLOC_STATE (1);
   879  out:
   880   return result;
   881 }
   882 
   883 void *
   884 _malloc_internal (size_t size)
   885 {
   886   void *result;
   887 
   888   LOCK ();
   889   result = _malloc_internal_nolock (size);
   890   UNLOCK ();
   891 
   892   return result;
   893 }
   894 
   895 void *
   896 malloc (size_t size)
   897 {
   898   void *(*hook) (size_t);
   899 
   900   if (!__malloc_initialized && !__malloc_initialize ())
   901     return NULL;
   902 
   903   /* Copy the value of gmalloc_hook to an automatic variable in case
   904      gmalloc_hook is modified in another thread between its
   905      NULL-check and the use.
   906 
   907      Note: Strictly speaking, this is not a right solution.  We should
   908      use mutexes to access non-read-only variables that are shared
   909      among multiple threads.  We just leave it for compatibility with
   910      glibc malloc (i.e., assignments to gmalloc_hook) for now.  */
   911   hook = gmalloc_hook;
   912   return (hook ? hook : _malloc_internal) (size);
   913 }
   914 
   915 #if !(defined (_LIBC) || defined (HYBRID_MALLOC))
   916 
   917 /* On some ANSI C systems, some libc functions call _malloc, _free
   918    and _realloc.  Make them use the GNU functions.  */
   919 
   920 extern void *_malloc (size_t);
   921 extern void _free (void *);
   922 extern void *_realloc (void *, size_t);
   923 
   924 void *
   925 _malloc (size_t size)
   926 {
   927   return malloc (size);
   928 }
   929 
   930 void
   931 _free (void *ptr)
   932 {
   933   free (ptr);
   934 }
   935 
   936 void *
   937 _realloc (void *ptr, size_t size)
   938 {
   939   return realloc (ptr, size);
   940 }
   941 
   942 #endif
   943 /* Free a block of memory allocated by `malloc'.
   944    Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
   945                   Written May 1989 by Mike Haertel.
   946 
   947 This library is free software; you can redistribute it and/or
   948 modify it under the terms of the GNU General Public License as
   949 published by the Free Software Foundation; either version 2 of the
   950 License, or (at your option) any later version.
   951 
   952 This library is distributed in the hope that it will be useful,
   953 but WITHOUT ANY WARRANTY; without even the implied warranty of
   954 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   955 General Public License for more details.
   956 
   957 You should have received a copy of the GNU General Public
   958 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
   959 
   960    The author may be reached (Email) at the address mike@ai.mit.edu,
   961    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
   962 
   963 /* Debugging hook for free.  */
   964 static void (*__MALLOC_HOOK_VOLATILE gfree_hook) (void *);
   965 
   966 #ifndef HYBRID_MALLOC
   967 
   968 /* List of blocks allocated by aligned_alloc.  */
   969 struct alignlist *_aligned_blocks = NULL;
   970 #endif
   971 
   972 /* Return memory to the heap.
   973    Like `_free_internal' but don't lock mutex.  */
   974 void
   975 _free_internal_nolock (void *ptr)
   976 {
   977   int type;
   978   size_t block, blocks;
   979   register size_t i;
   980   struct list *prev, *next;
   981   void *curbrk;
   982   const size_t lesscore_threshold
   983     /* Threshold of free space at which we will return some to the system.  */
   984     = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
   985 
   986   register struct alignlist *l;
   987 
   988   if (ptr == NULL)
   989     return;
   990 
   991   PROTECT_MALLOC_STATE (0);
   992 
   993   LOCK_ALIGNED_BLOCKS ();
   994   for (l = _aligned_blocks; l != NULL; l = l->next)
   995     if (l->aligned == ptr)
   996       {
   997         l->aligned = NULL;      /* Mark the slot in the list as free.  */
   998         ptr = l->exact;
   999         break;
  1000       }
  1001   UNLOCK_ALIGNED_BLOCKS ();
  1002 
  1003   block = BLOCK (ptr);
  1004 
  1005   type = _heapinfo[block].busy.type;
  1006   switch (type)
  1007     {
  1008     case -1:
  1009       /* Get as many statistics as early as we can.  */
  1010       --_chunks_used;
  1011       _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
  1012       _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
  1013 
  1014       /* Find the free cluster previous to this one in the free list.
  1015          Start searching at the last block referenced; this may benefit
  1016          programs with locality of allocation.  */
  1017       i = _heapindex;
  1018       if (i > block)
  1019         while (i > block)
  1020           i = _heapinfo[i].free.prev;
  1021       else
  1022         {
  1023           do
  1024             i = _heapinfo[i].free.next;
  1025           while (i > 0 && i < block);
  1026           i = _heapinfo[i].free.prev;
  1027         }
  1028 
  1029       /* Determine how to link this block into the free list.  */
  1030       if (block == i + _heapinfo[i].free.size)
  1031         {
  1032           /* Coalesce this block with its predecessor.  */
  1033           _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
  1034           block = i;
  1035         }
  1036       else
  1037         {
  1038           /* Really link this block back into the free list.  */
  1039           _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
  1040           _heapinfo[block].free.next = _heapinfo[i].free.next;
  1041           _heapinfo[block].free.prev = i;
  1042           _heapinfo[i].free.next = block;
  1043           _heapinfo[_heapinfo[block].free.next].free.prev = block;
  1044           ++_chunks_free;
  1045         }
  1046 
  1047       /* Now that the block is linked in, see if we can coalesce it
  1048          with its successor (by deleting its successor from the list
  1049          and adding in its size).  */
  1050       if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
  1051         {
  1052           _heapinfo[block].free.size
  1053             += _heapinfo[_heapinfo[block].free.next].free.size;
  1054           _heapinfo[block].free.next
  1055             = _heapinfo[_heapinfo[block].free.next].free.next;
  1056           _heapinfo[_heapinfo[block].free.next].free.prev = block;
  1057           --_chunks_free;
  1058         }
  1059 
  1060       /* How many trailing free blocks are there now?  */
  1061       blocks = _heapinfo[block].free.size;
  1062 
  1063       /* Where is the current end of accessible core?  */
  1064       curbrk = (*__morecore) (0);
  1065 
  1066       if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
  1067         {
  1068           /* The end of the malloc heap is at the end of accessible core.
  1069              It's possible that moving _heapinfo will allow us to
  1070              return some space to the system.  */
  1071 
  1072           size_t info_block = BLOCK (_heapinfo);
  1073           size_t info_blocks = _heapinfo[info_block].busy.info.size;
  1074           size_t prev_block = _heapinfo[block].free.prev;
  1075           size_t prev_blocks = _heapinfo[prev_block].free.size;
  1076           size_t next_block = _heapinfo[block].free.next;
  1077           size_t next_blocks = _heapinfo[next_block].free.size;
  1078 
  1079           if (/* Win if this block being freed is last in core, the info table
  1080                  is just before it, the previous free block is just before the
  1081                  info table, and the two free blocks together form a useful
  1082                  amount to return to the system.  */
  1083               (block + blocks == _heaplimit &&
  1084                info_block + info_blocks == block &&
  1085                prev_block != 0 && prev_block + prev_blocks == info_block &&
  1086                blocks + prev_blocks >= lesscore_threshold) ||
  1087               /* Nope, not the case.  We can also win if this block being
  1088                  freed is just before the info table, and the table extends
  1089                  to the end of core or is followed only by a free block,
  1090                  and the total free space is worth returning to the system.  */
  1091               (block + blocks == info_block &&
  1092                ((info_block + info_blocks == _heaplimit &&
  1093                  blocks >= lesscore_threshold) ||
  1094                 (info_block + info_blocks == next_block &&
  1095                  next_block + next_blocks == _heaplimit &&
  1096                  blocks + next_blocks >= lesscore_threshold)))
  1097               )
  1098             {
  1099               malloc_info *newinfo;
  1100               size_t oldlimit = _heaplimit;
  1101 
  1102               /* Free the old info table, clearing _heaplimit to avoid
  1103                  recursion into this code.  We don't want to return the
  1104                  table's blocks to the system before we have copied them to
  1105                  the new location.  */
  1106               _heaplimit = 0;
  1107               _free_internal_nolock (_heapinfo);
  1108               _heaplimit = oldlimit;
  1109 
  1110               /* Tell malloc to search from the beginning of the heap for
  1111                  free blocks, so it doesn't reuse the ones just freed.  */
  1112               _heapindex = 0;
  1113 
  1114               /* Allocate new space for the info table and move its data.  */
  1115               newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
  1116               PROTECT_MALLOC_STATE (0);
  1117               memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
  1118               _heapinfo = newinfo;
  1119 
  1120               /* We should now have coalesced the free block with the
  1121                  blocks freed from the old info table.  Examine the entire
  1122                  trailing free block to decide below whether to return some
  1123                  to the system.  */
  1124               block = _heapinfo[0].free.prev;
  1125               blocks = _heapinfo[block].free.size;
  1126             }
  1127 
  1128           /* Now see if we can return stuff to the system.  */
  1129           if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
  1130             {
  1131               register size_t bytes = blocks * BLOCKSIZE;
  1132               _heaplimit -= blocks;
  1133               (*__morecore) (-bytes);
  1134               _heapinfo[_heapinfo[block].free.prev].free.next
  1135                 = _heapinfo[block].free.next;
  1136               _heapinfo[_heapinfo[block].free.next].free.prev
  1137                 = _heapinfo[block].free.prev;
  1138               block = _heapinfo[block].free.prev;
  1139               --_chunks_free;
  1140               _bytes_free -= bytes;
  1141             }
  1142         }
  1143 
  1144       /* Set the next search to begin at this block.  */
  1145       _heapindex = block;
  1146       break;
  1147 
  1148     default:
  1149       /* Do some of the statistics.  */
  1150       --_chunks_used;
  1151       _bytes_used -= 1 << type;
  1152       ++_chunks_free;
  1153       _bytes_free += 1 << type;
  1154 
  1155       /* Get the address of the first free fragment in this block.  */
  1156       prev = (struct list *) ((char *) ADDRESS (block) +
  1157                               (_heapinfo[block].busy.info.frag.first << type));
  1158 
  1159       if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
  1160         {
  1161           /* If all fragments of this block are free, remove them
  1162              from the fragment list and free the whole block.  */
  1163           next = prev;
  1164           for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
  1165             next = next->next;
  1166           prev->prev->next = next;
  1167           if (next != NULL)
  1168             next->prev = prev->prev;
  1169           _heapinfo[block].busy.type = -1;
  1170           _heapinfo[block].busy.info.size = 1;
  1171 
  1172           /* Keep the statistics accurate.  */
  1173           ++_chunks_used;
  1174           _bytes_used += BLOCKSIZE;
  1175           _chunks_free -= BLOCKSIZE >> type;
  1176           _bytes_free -= BLOCKSIZE;
  1177 
  1178 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
  1179           _free_internal_nolock (ADDRESS (block));
  1180 #else
  1181           free (ADDRESS (block));
  1182 #endif
  1183         }
  1184       else if (_heapinfo[block].busy.info.frag.nfree != 0)
  1185         {
  1186           /* If some fragments of this block are free, link this
  1187              fragment into the fragment list after the first free
  1188              fragment of this block. */
  1189           next = ptr;
  1190           next->next = prev->next;
  1191           next->prev = prev;
  1192           prev->next = next;
  1193           if (next->next != NULL)
  1194             next->next->prev = next;
  1195           ++_heapinfo[block].busy.info.frag.nfree;
  1196         }
  1197       else
  1198         {
  1199           /* No fragments of this block are free, so link this
  1200              fragment into the fragment list and announce that
  1201              it is the first free fragment of this block. */
  1202           prev = ptr;
  1203           _heapinfo[block].busy.info.frag.nfree = 1;
  1204           _heapinfo[block].busy.info.frag.first =
  1205             (uintptr_t) ptr % BLOCKSIZE >> type;
  1206           prev->next = _fraghead[type].next;
  1207           prev->prev = &_fraghead[type];
  1208           prev->prev->next = prev;
  1209           if (prev->next != NULL)
  1210             prev->next->prev = prev;
  1211         }
  1212       break;
  1213     }
  1214 
  1215   PROTECT_MALLOC_STATE (1);
  1216 }
  1217 
  1218 /* Return memory to the heap.
  1219    Like 'free' but don't call a hook if there is one.  */
  1220 void
  1221 _free_internal (void *ptr)
  1222 {
  1223   LOCK ();
  1224   _free_internal_nolock (ptr);
  1225   UNLOCK ();
  1226 }
  1227 
  1228 /* Return memory to the heap.  */
  1229 
  1230 void
  1231 free (void *ptr)
  1232 {
  1233   void (*hook) (void *) = gfree_hook;
  1234 
  1235   if (hook != NULL)
  1236     (*hook) (ptr);
  1237   else
  1238     _free_internal (ptr);
  1239 }
  1240 
  1241 #ifndef HYBRID_MALLOC
  1242 /* Define the `cfree' alias for `free'.  */
  1243 #ifdef weak_alias
  1244 weak_alias (free, cfree)
  1245 #else
  1246 void
  1247 cfree (void *ptr)
  1248 {
  1249   free (ptr);
  1250 }
  1251 #endif
  1252 #endif
  1253 /* Change the size of a block allocated by `malloc'.
  1254    Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
  1255                      Written May 1989 by Mike Haertel.
  1256 
  1257 This library is free software; you can redistribute it and/or
  1258 modify it under the terms of the GNU General Public License as
  1259 published by the Free Software Foundation; either version 2 of the
  1260 License, or (at your option) any later version.
  1261 
  1262 This library is distributed in the hope that it will be useful,
  1263 but WITHOUT ANY WARRANTY; without even the implied warranty of
  1264 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  1265 General Public License for more details.
  1266 
  1267 You should have received a copy of the GNU General Public
  1268 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
  1269 
  1270    The author may be reached (Email) at the address mike@ai.mit.edu,
  1271    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
  1272 
  1273 #ifndef min
  1274 #define min(a, b) ((a) < (b) ? (a) : (b))
  1275 #endif
  1276 
  1277 /* Debugging hook for realloc.  */
  1278 static void *(*grealloc_hook) (void *, size_t);
  1279 
  1280 /* Resize the given region to the new size, returning a pointer
  1281    to the (possibly moved) region.  This is optimized for speed;
  1282    some benchmarks seem to indicate that greater compactness is
  1283    achieved by unconditionally allocating and copying to a
  1284    new region.  This module has incestuous knowledge of the
  1285    internals of both free and malloc. */
  1286 void *
  1287 _realloc_internal_nolock (void *ptr, size_t size)
  1288 {
  1289   void *result;
  1290   int type;
  1291   size_t block, blocks, oldlimit;
  1292 
  1293   if (size == 0)
  1294     {
  1295       _free_internal_nolock (ptr);
  1296       return _malloc_internal_nolock (0);
  1297     }
  1298   else if (ptr == NULL)
  1299     return _malloc_internal_nolock (size);
  1300 
  1301   block = BLOCK (ptr);
  1302 
  1303   PROTECT_MALLOC_STATE (0);
  1304 
  1305   type = _heapinfo[block].busy.type;
  1306   switch (type)
  1307     {
  1308     case -1:
  1309       /* Maybe reallocate a large block to a small fragment.  */
  1310       if (size <= BLOCKSIZE / 2)
  1311         {
  1312           result = _malloc_internal_nolock (size);
  1313           if (result != NULL)
  1314             {
  1315               memcpy (result, ptr, size);
  1316               _free_internal_nolock (ptr);
  1317               goto out;
  1318             }
  1319         }
  1320 
  1321       /* The new size is a large allocation as well;
  1322          see if we can hold it in place. */
  1323       blocks = BLOCKIFY (size);
  1324       if (blocks < _heapinfo[block].busy.info.size)
  1325         {
  1326           /* The new size is smaller; return
  1327              excess memory to the free list. */
  1328           _heapinfo[block + blocks].busy.type = -1;
  1329           _heapinfo[block + blocks].busy.info.size
  1330             = _heapinfo[block].busy.info.size - blocks;
  1331           _heapinfo[block].busy.info.size = blocks;
  1332           /* We have just created a new chunk by splitting a chunk in two.
  1333              Now we will free this chunk; increment the statistics counter
  1334              so it doesn't become wrong when _free_internal decrements it.  */
  1335           ++_chunks_used;
  1336           _free_internal_nolock (ADDRESS (block + blocks));
  1337           result = ptr;
  1338         }
  1339       else if (blocks == _heapinfo[block].busy.info.size)
  1340         /* No size change necessary.  */
  1341         result = ptr;
  1342       else
  1343         {
  1344           /* Won't fit, so allocate a new region that will.
  1345              Free the old region first in case there is sufficient
  1346              adjacent free space to grow without moving. */
  1347           blocks = _heapinfo[block].busy.info.size;
  1348           /* Prevent free from actually returning memory to the system.  */
  1349           oldlimit = _heaplimit;
  1350           _heaplimit = 0;
  1351           _free_internal_nolock (ptr);
  1352           result = _malloc_internal_nolock (size);
  1353           PROTECT_MALLOC_STATE (0);
  1354           if (_heaplimit == 0)
  1355             _heaplimit = oldlimit;
  1356           if (result == NULL)
  1357             {
  1358               /* Now we're really in trouble.  We have to unfree
  1359                  the thing we just freed.  Unfortunately it might
  1360                  have been coalesced with its neighbors.  */
  1361               if (_heapindex == block)
  1362                 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
  1363               else
  1364                 {
  1365                   void *previous
  1366                     = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
  1367                   (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
  1368                   _free_internal_nolock (previous);
  1369                 }
  1370               goto out;
  1371             }
  1372           if (ptr != result)
  1373             memmove (result, ptr, blocks * BLOCKSIZE);
  1374         }
  1375       break;
  1376 
  1377     default:
  1378       /* Old size is a fragment; type is logarithm
  1379          to base two of the fragment size.  */
  1380       if (size > (size_t) (1 << (type - 1)) &&
  1381           size <= (size_t) (1 << type))
  1382         /* The new size is the same kind of fragment.  */
  1383         result = ptr;
  1384       else
  1385         {
  1386           /* The new size is different; allocate a new space,
  1387              and copy the lesser of the new size and the old. */
  1388           result = _malloc_internal_nolock (size);
  1389           if (result == NULL)
  1390             goto out;
  1391           memcpy (result, ptr, min (size, (size_t) 1 << type));
  1392           _free_internal_nolock (ptr);
  1393         }
  1394       break;
  1395     }
  1396 
  1397   PROTECT_MALLOC_STATE (1);
  1398  out:
  1399   return result;
  1400 }
  1401 
  1402 void *
  1403 _realloc_internal (void *ptr, size_t size)
  1404 {
  1405   void *result;
  1406 
  1407   LOCK ();
  1408   result = _realloc_internal_nolock (ptr, size);
  1409   UNLOCK ();
  1410 
  1411   return result;
  1412 }
  1413 
  1414 void *
  1415 realloc (void *ptr, size_t size)
  1416 {
  1417   void *(*hook) (void *, size_t);
  1418 
  1419   if (!__malloc_initialized && !__malloc_initialize ())
  1420     return NULL;
  1421 
  1422   hook = grealloc_hook;
  1423   return (hook ? hook : _realloc_internal) (ptr, size);
  1424 }
  1425 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
  1426 
  1427 This library is free software; you can redistribute it and/or
  1428 modify it under the terms of the GNU General Public License as
  1429 published by the Free Software Foundation; either version 2 of the
  1430 License, or (at your option) any later version.
  1431 
  1432 This library is distributed in the hope that it will be useful,
  1433 but WITHOUT ANY WARRANTY; without even the implied warranty of
  1434 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  1435 General Public License for more details.
  1436 
  1437 You should have received a copy of the GNU General Public
  1438 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
  1439 
  1440    The author may be reached (Email) at the address mike@ai.mit.edu,
  1441    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
  1442 
  1443 /* Allocate an array of NMEMB elements each SIZE bytes long.
  1444    The entire array is initialized to zeros.  */
  1445 void *
  1446 calloc (size_t nmemb, size_t size)
  1447 {
  1448   void *result;
  1449   size_t bytes = nmemb * size;
  1450 
  1451   if (size != 0 && bytes / size != nmemb)
  1452     {
  1453       errno = ENOMEM;
  1454       return NULL;
  1455     }
  1456 
  1457   result = malloc (bytes);
  1458   if (result)
  1459     return memset (result, 0, bytes);
  1460   return result;
  1461 }
  1462 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
  1463 This file is part of the GNU C Library.
  1464 
  1465 The GNU C Library is free software; you can redistribute it and/or modify
  1466 it under the terms of the GNU General Public License as published by
  1467 the Free Software Foundation; either version 2, or (at your option)
  1468 any later version.
  1469 
  1470 The GNU C Library is distributed in the hope that it will be useful,
  1471 but WITHOUT ANY WARRANTY; without even the implied warranty of
  1472 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  1473 GNU General Public License for more details.
  1474 
  1475 You should have received a copy of the GNU General Public License
  1476 along with the GNU C Library.  If not, see <https://www.gnu.org/licenses/>.  */
  1477 
  1478 /* uClibc defines __GNU_LIBRARY__, but it is not completely
  1479    compatible.  */
  1480 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
  1481 #define __sbrk  sbrk
  1482 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
  1483 /* It is best not to declare this and cast its result on foreign operating
  1484    systems with potentially hostile include files.  */
  1485 
  1486 extern void *__sbrk (ptrdiff_t increment);
  1487 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
  1488 
  1489 /* Allocate INCREMENT more bytes of data space,
  1490    and return the start of data space, or NULL on errors.
  1491    If INCREMENT is negative, shrink data space.  */
  1492 static void *
  1493 gdefault_morecore (ptrdiff_t increment)
  1494 {
  1495 #ifdef HYBRID_MALLOC
  1496   if (!definitely_will_not_unexec_p ())
  1497     {
  1498       return bss_sbrk (increment);
  1499     }
  1500 #endif
  1501 #ifdef HAVE_SBRK
  1502   void *result = (void *) __sbrk (increment);
  1503   if (result != (void *) -1)
  1504     return result;
  1505 #endif
  1506   return NULL;
  1507 }
  1508 
  1509 void *(*__morecore) (ptrdiff_t) = gdefault_morecore;
  1510 
  1511 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
  1512 
  1513 This library is free software; you can redistribute it and/or
  1514 modify it under the terms of the GNU General Public License as
  1515 published by the Free Software Foundation; either version 2 of the
  1516 License, or (at your option) any later version.
  1517 
  1518 This library is distributed in the hope that it will be useful,
  1519 but WITHOUT ANY WARRANTY; without even the implied warranty of
  1520 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  1521 General Public License for more details.
  1522 
  1523 You should have received a copy of the GNU General Public
  1524 License along with this library.  If not, see <https://www.gnu.org/licenses/>.  */
  1525 
  1526 void *
  1527 aligned_alloc (size_t alignment, size_t size)
  1528 {
  1529   void *result;
  1530   size_t adj, lastadj;
  1531 
  1532   /* Allocate a block with enough extra space to pad the block with up to
  1533      (ALIGNMENT - 1) bytes if necessary.  */
  1534   if (- size < alignment)
  1535     {
  1536       errno = ENOMEM;
  1537       return NULL;
  1538     }
  1539   result = malloc (size + alignment - 1);
  1540   if (result == NULL)
  1541     return NULL;
  1542 
  1543   /* Figure out how much we will need to pad this particular block
  1544      to achieve the required alignment.  */
  1545   adj = alignment - (uintptr_t) result % alignment;
  1546   if (adj == alignment)
  1547     adj = 0;
  1548 
  1549   if (adj != alignment - 1)
  1550     {
  1551       do
  1552         {
  1553           /* Reallocate the block with only as much excess as it
  1554              needs.  */
  1555           free (result);
  1556           result = malloc (size + adj);
  1557           if (result == NULL)   /* Impossible unless interrupted.  */
  1558             return NULL;
  1559 
  1560           lastadj = adj;
  1561           adj = alignment - (uintptr_t) result % alignment;
  1562           if (adj == alignment)
  1563             adj = 0;
  1564           /* It's conceivable we might have been so unlucky as to get
  1565              a different block with weaker alignment.  If so, this
  1566              block is too short to contain SIZE after alignment
  1567              correction.  So we must try again and get another block,
  1568              slightly larger.  */
  1569         } while (adj > lastadj);
  1570     }
  1571 
  1572   if (adj != 0)
  1573     {
  1574       /* Record this block in the list of aligned blocks, so that `free'
  1575          can identify the pointer it is passed, which will be in the middle
  1576          of an allocated block.  */
  1577 
  1578       struct alignlist *l;
  1579       LOCK_ALIGNED_BLOCKS ();
  1580       for (l = _aligned_blocks; l != NULL; l = l->next)
  1581         if (l->aligned == NULL)
  1582           /* This slot is free.  Use it.  */
  1583           break;
  1584       if (l == NULL)
  1585         {
  1586           l = malloc (sizeof *l);
  1587           if (l != NULL)
  1588             {
  1589               l->next = _aligned_blocks;
  1590               _aligned_blocks = l;
  1591             }
  1592         }
  1593       if (l != NULL)
  1594         {
  1595           l->exact = result;
  1596           result = l->aligned = (char *) result + adj;
  1597         }
  1598       UNLOCK_ALIGNED_BLOCKS ();
  1599       if (l == NULL)
  1600         {
  1601           free (result);
  1602           result = NULL;
  1603         }
  1604     }
  1605 
  1606   return result;
  1607 }
  1608 
  1609 /* Note that memalign and posix_memalign are not used in Emacs.  */
  1610 #ifndef HYBRID_MALLOC
  1611 /* An obsolete alias for aligned_alloc, for any old libraries that use
  1612    this alias.  */
  1613 
  1614 void *
  1615 memalign (size_t alignment, size_t size)
  1616 {
  1617   return aligned_alloc (alignment, size);
  1618 }
  1619 
  1620 /* If HYBRID_MALLOC is defined, we may want to use the system
  1621    posix_memalign below.  */
  1622 int
  1623 posix_memalign (void **memptr, size_t alignment, size_t size)
  1624 {
  1625   void *mem;
  1626 
  1627   if (alignment == 0
  1628       || alignment % sizeof (void *) != 0
  1629       || (alignment & (alignment - 1)) != 0)
  1630     return EINVAL;
  1631 
  1632   mem = aligned_alloc (alignment, size);
  1633   if (mem == NULL)
  1634     return ENOMEM;
  1635 
  1636   *memptr = mem;
  1637 
  1638   return 0;
  1639 }
  1640 #endif
  1641 
  1642 /* Allocate memory on a page boundary.
  1643    Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
  1644 
  1645 This library is free software; you can redistribute it and/or
  1646 modify it under the terms of the GNU General Public License as
  1647 published by the Free Software Foundation; either version 2 of the
  1648 License, or (at your option) any later version.
  1649 
  1650 This library is distributed in the hope that it will be useful,
  1651 but WITHOUT ANY WARRANTY; without even the implied warranty of
  1652 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  1653 General Public License for more details.
  1654 
  1655 You should have received a copy of the GNU General Public
  1656 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
  1657 
  1658    The author may be reached (Email) at the address mike@ai.mit.edu,
  1659    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
  1660 
  1661 #ifndef HYBRID_MALLOC
  1662 
  1663 # ifndef HAVE_MALLOC_H
  1664 /* Allocate SIZE bytes on a page boundary.  */
  1665 extern void *valloc (size_t);
  1666 # endif
  1667 
  1668 # if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
  1669 #  include "getpagesize.h"
  1670 # elif !defined getpagesize
  1671 extern int getpagesize (void);
  1672 # endif
  1673 
  1674 static size_t pagesize;
  1675 
  1676 void *
  1677 valloc (size_t size)
  1678 {
  1679   if (pagesize == 0)
  1680     pagesize = getpagesize ();
  1681 
  1682   return aligned_alloc (pagesize, size);
  1683 }
  1684 #endif /* HYBRID_MALLOC */
  1685 
  1686 #undef malloc
  1687 #undef realloc
  1688 #undef calloc
  1689 #undef aligned_alloc
  1690 #undef free
  1691 
  1692 #ifdef HYBRID_MALLOC
  1693 
  1694 /* Assuming PTR was allocated via the hybrid malloc, return true if
  1695    PTR was allocated via gmalloc, not the system malloc.  Also, return
  1696    true if _heaplimit is zero; this can happen temporarily when
  1697    gmalloc calls itself for internal use, and in that case PTR is
  1698    already known to be allocated via gmalloc.  */
  1699 
  1700 static bool
  1701 allocated_via_gmalloc (void *ptr)
  1702 {
  1703   if (!__malloc_initialized)
  1704     return false;
  1705   size_t block = BLOCK (ptr);
  1706   size_t blockmax = _heaplimit - 1;
  1707   return block <= blockmax && _heapinfo[block].busy.type != 0;
  1708 }
  1709 
  1710 /* See the comments near the beginning of this file for explanations
  1711    of the following functions. */
  1712 
  1713 void *
  1714 hybrid_malloc (size_t size)
  1715 {
  1716   if (definitely_will_not_unexec_p ())
  1717     return malloc (size);
  1718   return gmalloc (size);
  1719 }
  1720 
  1721 void *
  1722 hybrid_calloc (size_t nmemb, size_t size)
  1723 {
  1724   if (definitely_will_not_unexec_p ())
  1725     return calloc (nmemb, size);
  1726   return gcalloc (nmemb, size);
  1727 }
  1728 
  1729 static void
  1730 hybrid_free_1 (void *ptr)
  1731 {
  1732   if (allocated_via_gmalloc (ptr))
  1733     gfree (ptr);
  1734   else
  1735     free (ptr);
  1736 }
  1737 
  1738 void
  1739 hybrid_free (void *ptr)
  1740 {
  1741   /* Stolen from Gnulib, to make sure we preserve errno.  */
  1742 #if defined __GNUC__ && !defined __clang__
  1743   int err[2];
  1744   err[0] = errno;
  1745   err[1] = errno;
  1746   errno = 0;
  1747   hybrid_free_1 (ptr);
  1748   errno = err[errno == 0];
  1749 #else
  1750   int err = errno;
  1751   hybrid_free_1 (ptr);
  1752   errno = err;
  1753 #endif
  1754 }
  1755 
  1756 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
  1757 void *
  1758 hybrid_aligned_alloc (size_t alignment, size_t size)
  1759 {
  1760   if (!definitely_will_not_unexec_p ())
  1761     return galigned_alloc (alignment, size);
  1762   /* The following is copied from alloc.c */
  1763 #ifdef HAVE_ALIGNED_ALLOC
  1764   return aligned_alloc (alignment, size);
  1765 #else  /* HAVE_POSIX_MEMALIGN */
  1766   void *p;
  1767   return posix_memalign (&p, alignment, size) == 0 ? p : 0;
  1768 #endif
  1769 }
  1770 #endif
  1771 
  1772 void *
  1773 hybrid_realloc (void *ptr, size_t size)
  1774 {
  1775   void *result;
  1776   int type;
  1777   size_t block, oldsize;
  1778 
  1779   if (!ptr)
  1780     return hybrid_malloc (size);
  1781   if (!allocated_via_gmalloc (ptr))
  1782     return realloc (ptr, size);
  1783   if (!definitely_will_not_unexec_p ())
  1784     return grealloc (ptr, size);
  1785 
  1786   /* The dumped emacs is trying to realloc storage allocated before
  1787      dumping via gmalloc.  Allocate new space and copy the data.  Do
  1788      not bother with gfree (ptr), as that would just waste time.  */
  1789   block = BLOCK (ptr);
  1790   type = _heapinfo[block].busy.type;
  1791   oldsize =
  1792     type < 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
  1793     : (size_t) 1 << type;
  1794   result = malloc (size);
  1795   if (result)
  1796     return memcpy (result, ptr, min (oldsize, size));
  1797   return result;
  1798 }
  1799 
  1800 #else   /* ! HYBRID_MALLOC */
  1801 
  1802 void *
  1803 malloc (size_t size)
  1804 {
  1805   return gmalloc (size);
  1806 }
  1807 
  1808 void *
  1809 calloc (size_t nmemb, size_t size)
  1810 {
  1811   return gcalloc (nmemb, size);
  1812 }
  1813 
  1814 void
  1815 free (void *ptr)
  1816 {
  1817   gfree (ptr);
  1818 }
  1819 
  1820 void *
  1821 aligned_alloc (size_t alignment, size_t size)
  1822 {
  1823   return galigned_alloc (alignment, size);
  1824 }
  1825 
  1826 void *
  1827 realloc (void *ptr, size_t size)
  1828 {
  1829   return grealloc (ptr, size);
  1830 }
  1831 
  1832 #endif  /* HYBRID_MALLOC */
  1833 
  1834 #ifdef GC_MCHECK
  1835 
  1836 /* Standard debugging hooks for `malloc'.
  1837    Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
  1838    Written May 1989 by Mike Haertel.
  1839 
  1840 This library is free software; you can redistribute it and/or
  1841 modify it under the terms of the GNU General Public License as
  1842 published by the Free Software Foundation; either version 2 of the
  1843 License, or (at your option) any later version.
  1844 
  1845 This library is distributed in the hope that it will be useful,
  1846 but WITHOUT ANY WARRANTY; without even the implied warranty of
  1847 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  1848 General Public License for more details.
  1849 
  1850 You should have received a copy of the GNU General Public
  1851 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
  1852 
  1853    The author may be reached (Email) at the address mike@ai.mit.edu,
  1854    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
  1855 
  1856 #include <stdio.h>
  1857 
  1858 /* Old hook values.  */
  1859 static void (*old_free_hook) (void *ptr);
  1860 static void *(*old_malloc_hook) (size_t size);
  1861 static void *(*old_realloc_hook) (void *ptr, size_t size);
  1862 
  1863 /* Function to call when something awful happens.  */
  1864 static void (*abortfunc) (enum mcheck_status);
  1865 
  1866 /* Arbitrary magical numbers.  */
  1867 #define MAGICWORD       (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
  1868 #define MAGICFREE       (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
  1869 #define MAGICBYTE       ((char) 0xd7)
  1870 #define MALLOCFLOOD     ((char) 0x93)
  1871 #define FREEFLOOD       ((char) 0x95)
  1872 
  1873 struct hdr
  1874   {
  1875     size_t size;        /* Exact size requested by user.  */
  1876     size_t magic;       /* Magic number to check header integrity.  */
  1877   };
  1878 
  1879 static enum mcheck_status
  1880 checkhdr (const struct hdr *hdr)
  1881 {
  1882   enum mcheck_status status;
  1883   switch (hdr->magic)
  1884     {
  1885     default:
  1886       status = MCHECK_HEAD;
  1887       break;
  1888     case MAGICFREE:
  1889       status = MCHECK_FREE;
  1890       break;
  1891     case MAGICWORD:
  1892       if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
  1893         status = MCHECK_TAIL;
  1894       else
  1895         status = MCHECK_OK;
  1896       break;
  1897     }
  1898   if (status != MCHECK_OK)
  1899     (*abortfunc) (status);
  1900   return status;
  1901 }
  1902 
  1903 static void
  1904 freehook (void *ptr)
  1905 {
  1906   struct hdr *hdr;
  1907 
  1908   if (ptr)
  1909     {
  1910       struct alignlist *l;
  1911 
  1912       /* If the block was allocated by aligned_alloc, its real pointer
  1913          to free is recorded in _aligned_blocks; find that.  */
  1914       PROTECT_MALLOC_STATE (0);
  1915       LOCK_ALIGNED_BLOCKS ();
  1916       for (l = _aligned_blocks; l != NULL; l = l->next)
  1917         if (l->aligned == ptr)
  1918           {
  1919             l->aligned = NULL;  /* Mark the slot in the list as free.  */
  1920             ptr = l->exact;
  1921             break;
  1922           }
  1923       UNLOCK_ALIGNED_BLOCKS ();
  1924       PROTECT_MALLOC_STATE (1);
  1925 
  1926       hdr = ((struct hdr *) ptr) - 1;
  1927       checkhdr (hdr);
  1928       hdr->magic = MAGICFREE;
  1929       memset (ptr, FREEFLOOD, hdr->size);
  1930     }
  1931   else
  1932     hdr = NULL;
  1933 
  1934   gfree_hook = old_free_hook;
  1935   free (hdr);
  1936   gfree_hook = freehook;
  1937 }
  1938 
  1939 static void *
  1940 mallochook (size_t size)
  1941 {
  1942   struct hdr *hdr;
  1943 
  1944   gmalloc_hook = old_malloc_hook;
  1945   hdr = malloc (sizeof *hdr + size + 1);
  1946   gmalloc_hook = mallochook;
  1947   if (hdr == NULL)
  1948     return NULL;
  1949 
  1950   hdr->size = size;
  1951   hdr->magic = MAGICWORD;
  1952   ((char *) &hdr[1])[size] = MAGICBYTE;
  1953   return memset (hdr + 1, MALLOCFLOOD, size);
  1954 }
  1955 
  1956 static void *
  1957 reallochook (void *ptr, size_t size)
  1958 {
  1959   struct hdr *hdr = NULL;
  1960   size_t osize = 0;
  1961 
  1962   if (ptr)
  1963     {
  1964       hdr = ((struct hdr *) ptr) - 1;
  1965       osize = hdr->size;
  1966 
  1967       checkhdr (hdr);
  1968       if (size < osize)
  1969         memset ((char *) ptr + size, FREEFLOOD, osize - size);
  1970     }
  1971 
  1972   gfree_hook = old_free_hook;
  1973   gmalloc_hook = old_malloc_hook;
  1974   grealloc_hook = old_realloc_hook;
  1975   hdr = realloc (hdr, sizeof *hdr + size + 1);
  1976   gfree_hook = freehook;
  1977   gmalloc_hook = mallochook;
  1978   grealloc_hook = reallochook;
  1979   if (hdr == NULL)
  1980     return NULL;
  1981 
  1982   hdr->size = size;
  1983   hdr->magic = MAGICWORD;
  1984   ((char *) &hdr[1])[size] = MAGICBYTE;
  1985   if (size > osize)
  1986     memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
  1987   return hdr + 1;
  1988 }
  1989 
  1990 static void
  1991 mabort (enum mcheck_status status)
  1992 {
  1993   const char *msg;
  1994   switch (status)
  1995     {
  1996     case MCHECK_OK:
  1997       msg = "memory is consistent, library is buggy";
  1998       break;
  1999     case MCHECK_HEAD:
  2000       msg = "memory clobbered before allocated block";
  2001       break;
  2002     case MCHECK_TAIL:
  2003       msg = "memory clobbered past end of allocated block";
  2004       break;
  2005     case MCHECK_FREE:
  2006       msg = "block freed twice";
  2007       break;
  2008     default:
  2009       msg = "bogus mcheck_status, library is buggy";
  2010       break;
  2011     }
  2012 #ifdef __GNU_LIBRARY__
  2013   __libc_fatal (msg);
  2014 #else
  2015   fprintf (stderr, "mcheck: %s\n", msg);
  2016   emacs_abort ();
  2017 #endif
  2018 }
  2019 
  2020 static int mcheck_used = 0;
  2021 
  2022 int
  2023 mcheck (void (*func) (enum mcheck_status))
  2024 {
  2025   abortfunc = (func != NULL) ? func : &mabort;
  2026 
  2027   /* These hooks may not be safely inserted if malloc is already in use.  */
  2028   if (!__malloc_initialized && !mcheck_used)
  2029     {
  2030       old_free_hook = gfree_hook;
  2031       gfree_hook = freehook;
  2032       old_malloc_hook = gmalloc_hook;
  2033       gmalloc_hook = mallochook;
  2034       old_realloc_hook = grealloc_hook;
  2035       grealloc_hook = reallochook;
  2036       mcheck_used = 1;
  2037     }
  2038 
  2039   return mcheck_used ? 0 : -1;
  2040 }
  2041 
  2042 enum mcheck_status
  2043 mprobe (void *ptr)
  2044 {
  2045   return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
  2046 }
  2047 
  2048 #endif /* GC_MCHECK */

/* [<][>][^][v][top][bottom][index][help] */