root/test/manual/etags/c-src/emacs/src/gmalloc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. protect_malloc_state
  2. align
  3. get_contiguous_space
  4. register_heapinfo
  5. malloc_atfork_handler_prepare
  6. malloc_atfork_handler_parent
  7. malloc_atfork_handler_child
  8. malloc_enable_thread
  9. malloc_initialize_1
  10. __malloc_initialize
  11. morecore_nolock
  12. _malloc_internal_nolock
  13. _malloc_internal
  14. malloc
  15. _malloc
  16. _free
  17. _realloc
  18. _free_internal_nolock
  19. _free_internal
  20. free
  21. weak_alias
  22. _realloc_internal_nolock
  23. _realloc_internal
  24. realloc
  25. calloc
  26. __default_morecore
  27. aligned_alloc
  28. memalign
  29. posix_memalign
  30. valloc
  31. hybrid_malloc
  32. hybrid_calloc
  33. hybrid_free
  34. hybrid_aligned_alloc
  35. hybrid_realloc
  36. hybrid_get_current_dir_name
  37. checkhdr
  38. freehook
  39. mallochook
  40. reallochook
  41. mabort
  42. mcheck
  43. mprobe

     1 /* Declarations for `malloc' and friends.
     2    Copyright (C) 1990-1993, 1995-1996, 1999, 2002-2007, 2013-2023 Free
     3    Software Foundation, Inc.
     4                   Written May 1989 by Mike Haertel.
     5 
     6 This library is free software; you can redistribute it and/or
     7 modify it under the terms of the GNU General Public License as
     8 published by the Free Software Foundation; either version 2 of the
     9 License, or (at your option) any later version.
    10 
    11 This library is distributed in the hope that it will be useful,
    12 but WITHOUT ANY WARRANTY; without even the implied warranty of
    13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    14 General Public License for more details.
    15 
    16 You should have received a copy of the GNU General Public
    17 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
    18 
    19    The author may be reached (Email) at the address mike@ai.mit.edu,
    20    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
    21 
    22 #include <config.h>
    23 
    24 #if defined HAVE_PTHREAD && !defined HYBRID_MALLOC
    25 #define USE_PTHREAD
    26 #endif
    27 
    28 #include <string.h>
    29 #include <limits.h>
    30 #include <stdint.h>
    31 
    32 #ifdef HYBRID_GET_CURRENT_DIR_NAME
    33 #undef get_current_dir_name
    34 #endif
    35 
    36 #include <unistd.h>
    37 
    38 #ifdef USE_PTHREAD
    39 #include <pthread.h>
    40 #endif
    41 
    42 #ifdef WINDOWSNT
    43 #include <w32heap.h>    /* for sbrk */
    44 #endif
    45 
    46 #ifdef emacs
    47 extern void emacs_abort (void);
    48 #endif
    49 
    50 /* If HYBRID_MALLOC is defined, then temacs will use malloc,
    51    realloc... as defined in this file (and renamed gmalloc,
    52    grealloc... via the macros that follow).  The dumped emacs,
    53    however, will use the system malloc, realloc....  In other source
    54    files, malloc, realloc... are renamed hybrid_malloc,
    55    hybrid_realloc... via macros in conf_post.h.  hybrid_malloc and
    56    friends are wrapper functions defined later in this file.
    57    aligned_alloc is defined as a macro only in alloc.c.
    58 
    59    As of this writing (August 2014), Cygwin is the only platform on
    60    which HYBRID_MACRO is defined.  Any other platform that wants to
    61    define it will have to define the macros DUMPED and
    62    ALLOCATED_BEFORE_DUMPING, defined below for Cygwin.  */
    63 #ifdef HYBRID_MALLOC
    64 #undef malloc
    65 #undef realloc
    66 #undef calloc
    67 #undef free
    68 #define malloc gmalloc
    69 #define realloc grealloc
    70 #define calloc gcalloc
    71 #define aligned_alloc galigned_alloc
    72 #define free gfree
    73 #endif  /* HYBRID_MALLOC */
    74 
    75 #ifdef CYGWIN
    76 extern void *bss_sbrk (ptrdiff_t size);
    77 extern int bss_sbrk_did_unexec;
    78 extern char bss_sbrk_buffer[];
    79 extern void *bss_sbrk_buffer_end;
    80 #define DUMPED bss_sbrk_did_unexec
    81 #define ALLOCATED_BEFORE_DUMPING(P) \
    82   ((P) < bss_sbrk_buffer_end && (P) >= (void *) bss_sbrk_buffer)
    83 #endif
    84 
    85 #ifdef  __cplusplus
    86 extern "C"
    87 {
    88 #endif
    89 
    90 #include <stddef.h>
    91 
    92 
    93 /* Allocate SIZE bytes of memory.  */
    94 extern void *malloc (size_t size) ATTRIBUTE_MALLOC_SIZE ((1));
    95 /* Re-allocate the previously allocated block
    96    in ptr, making the new block SIZE bytes long.  */
    97 extern void *realloc (void *ptr, size_t size) ATTRIBUTE_ALLOC_SIZE ((2));
    98 /* Allocate NMEMB elements of SIZE bytes each, all initialized to 0.  */
    99 extern void *calloc (size_t nmemb, size_t size) ATTRIBUTE_MALLOC_SIZE ((1,2));
   100 /* Free a block allocated by `malloc', `realloc' or `calloc'.  */
   101 extern void free (void *ptr);
   102 
   103 /* Allocate SIZE bytes allocated to ALIGNMENT bytes.  */
   104 #ifdef MSDOS
   105 extern void *aligned_alloc (size_t, size_t);
   106 extern void *memalign (size_t, size_t);
   107 extern int posix_memalign (void **, size_t, size_t);
   108 #endif
   109 
   110 #ifdef USE_PTHREAD
   111 /* Set up mutexes and make malloc etc. thread-safe.  */
   112 extern void malloc_enable_thread (void);
   113 #endif
   114 
   115 #ifdef emacs
   116 extern void emacs_abort (void);
   117 #endif
   118 
   119 /* The allocator divides the heap into blocks of fixed size; large
   120    requests receive one or more whole blocks, and small requests
   121    receive a fragment of a block.  Fragment sizes are powers of two,
   122    and all fragments of a block are the same size.  When all the
   123    fragments in a block have been freed, the block itself is freed.  */
   124 #define INT_BIT         (CHAR_BIT * sizeof (int))
   125 #define BLOCKLOG        (INT_BIT > 16 ? 12 : 9)
   126 #define BLOCKSIZE       (1 << BLOCKLOG)
   127 #define BLOCKIFY(SIZE)  (((SIZE) + BLOCKSIZE - 1) / BLOCKSIZE)
   128 
   129 /* Determine the amount of memory spanned by the initial heap table
   130    (not an absolute limit).  */
   131 #define HEAP            (INT_BIT > 16 ? 4194304 : 65536)
   132 
   133 /* Number of contiguous free blocks allowed to build up at the end of
   134    memory before they will be returned to the system.  */
   135 #define FINAL_FREE_BLOCKS       8
   136 
   137 /* Data structure giving per-block information.  */
   138 typedef union
   139   {
   140     /* Heap information for a busy block.  */
   141     struct
   142       {
   143         /* Zero for a large (multiblock) object, or positive giving the
   144            logarithm to the base two of the fragment size.  */
   145         int type;
   146         union
   147           {
   148             struct
   149               {
   150                 size_t nfree; /* Free frags in a fragmented block.  */
   151                 size_t first; /* First free fragment of the block.  */
   152               } frag;
   153             /* For a large object, in its first block, this has the number
   154                of blocks in the object.  In the other blocks, this has a
   155                negative number which says how far back the first block is.  */
   156             ptrdiff_t size;
   157           } info;
   158       } busy;
   159     /* Heap information for a free block
   160        (that may be the first of a free cluster).  */
   161     struct
   162       {
   163         size_t size;    /* Size (in blocks) of a free cluster.  */
   164         size_t next;    /* Index of next free cluster.  */
   165         size_t prev;    /* Index of previous free cluster.  */
   166       } free;
   167   } malloc_info;
   168 
   169 /* Pointer to first block of the heap.  */
   170 extern char *_heapbase;
   171 
   172 /* Table indexed by block number giving per-block information.  */
   173 extern malloc_info *_heapinfo;
   174 
   175 /* Address to block number and vice versa.  */
   176 #define BLOCK(A)        (((char *) (A) - _heapbase) / BLOCKSIZE + 1)
   177 #define ADDRESS(B)      ((void *) (((B) - 1) * BLOCKSIZE + _heapbase))
   178 
   179 /* Current search index for the heap table.  */
   180 extern size_t _heapindex;
   181 
   182 /* Limit of valid info table indices.  */
   183 extern size_t _heaplimit;
   184 
   185 /* Doubly linked lists of free fragments.  */
   186 struct list
   187   {
   188     struct list *next;
   189     struct list *prev;
   190   };
   191 
   192 /* Free list headers for each fragment size.  */
   193 extern struct list _fraghead[];
   194 
   195 /* List of blocks allocated with aligned_alloc and friends.  */
   196 struct alignlist
   197   {
   198     struct alignlist *next;
   199     void *aligned;              /* The address that aligned_alloc returned.  */
   200     void *exact;                /* The address that malloc returned.  */
   201   };
   202 extern struct alignlist *_aligned_blocks;
   203 
   204 /* Instrumentation.  */
   205 extern size_t _chunks_used;
   206 extern size_t _bytes_used;
   207 extern size_t _chunks_free;
   208 extern size_t _bytes_free;
   209 
   210 /* Internal versions of `malloc', `realloc', and `free'
   211    used when these functions need to call each other.
   212    They are the same but don't call the hooks.  */
   213 extern void *_malloc_internal (size_t);
   214 extern void *_realloc_internal (void *, size_t);
   215 extern void _free_internal (void *);
   216 extern void *_malloc_internal_nolock (size_t);
   217 extern void *_realloc_internal_nolock (void *, size_t);
   218 extern void _free_internal_nolock (void *);
   219 
   220 #ifdef USE_PTHREAD
   221 extern pthread_mutex_t _malloc_mutex, _aligned_blocks_mutex;
   222 extern int _malloc_thread_enabled_p;
   223 #define LOCK()                                  \
   224   do {                                          \
   225     if (_malloc_thread_enabled_p)               \
   226       pthread_mutex_lock (&_malloc_mutex);      \
   227   } while (0)
   228 #define UNLOCK()                                \
   229   do {                                          \
   230     if (_malloc_thread_enabled_p)               \
   231       pthread_mutex_unlock (&_malloc_mutex);    \
   232   } while (0)
   233 #define LOCK_ALIGNED_BLOCKS()                           \
   234   do {                                                  \
   235     if (_malloc_thread_enabled_p)                       \
   236       pthread_mutex_lock (&_aligned_blocks_mutex);      \
   237   } while (0)
   238 #define UNLOCK_ALIGNED_BLOCKS()                         \
   239   do {                                                  \
   240     if (_malloc_thread_enabled_p)                       \
   241       pthread_mutex_unlock (&_aligned_blocks_mutex);    \
   242   } while (0)
   243 #else
   244 #define LOCK()
   245 #define UNLOCK()
   246 #define LOCK_ALIGNED_BLOCKS()
   247 #define UNLOCK_ALIGNED_BLOCKS()
   248 #endif
   249 
   250 /* Given an address in the middle of a malloc'd object,
   251    return the address of the beginning of the object.  */
   252 extern void *malloc_find_object_address (void *ptr);
   253 
   254 /* Underlying allocation function; successive calls should
   255    return contiguous pieces of memory.  */
   256 extern void *(*__morecore) (ptrdiff_t size);
   257 
   258 /* Default value of `__morecore'.  */
   259 extern void *__default_morecore (ptrdiff_t size);
   260 
   261 /* If not NULL, this function is called after each time
   262    `__morecore' is called to increase the data size.  */
   263 extern void (*__after_morecore_hook) (void);
   264 
   265 /* Number of extra blocks to get each time we ask for more core.
   266    This reduces the frequency of calling `(*__morecore)'.  */
   267 extern size_t __malloc_extra_blocks;
   268 
   269 /* Nonzero if `malloc' has been called and done its initialization.  */
   270 extern int __malloc_initialized;
   271 /* Function called to initialize malloc data structures.  */
   272 extern int __malloc_initialize (void);
   273 
   274 /* Hooks for debugging versions.  */
   275 extern void (*__malloc_initialize_hook) (void);
   276 extern void (*__free_hook) (void *ptr);
   277 extern void *(*__malloc_hook) (size_t size);
   278 extern void *(*__realloc_hook) (void *ptr, size_t size);
   279 extern void *(*__memalign_hook) (size_t size, size_t alignment);
   280 
   281 /* Return values for `mprobe': these are the kinds of inconsistencies that
   282    `mcheck' enables detection of.  */
   283 enum mcheck_status
   284   {
   285     MCHECK_DISABLED = -1,       /* Consistency checking is not turned on.  */
   286     MCHECK_OK,                  /* Block is fine.  */
   287     MCHECK_FREE,                /* Block freed twice.  */
   288     MCHECK_HEAD,                /* Memory before the block was clobbered.  */
   289     MCHECK_TAIL                 /* Memory after the block was clobbered.  */
   290   };
   291 
   292 /* Activate a standard collection of debugging hooks.  This must be called
   293    before `malloc' is ever called.  ABORTFUNC is called with an error code
   294    (see enum above) when an inconsistency is detected.  If ABORTFUNC is
   295    null, the standard function prints on stderr and then calls `abort'.  */
   296 extern int mcheck (void (*abortfunc) (enum mcheck_status));
   297 
   298 /* Check for aberrations in a particular malloc'd block.  You must have
   299    called `mcheck' already.  These are the same checks that `mcheck' does
   300    when you free or reallocate a block.  */
   301 extern enum mcheck_status mprobe (void *ptr);
   302 
   303 /* Activate a standard collection of tracing hooks.  */
   304 extern void mtrace (void);
   305 extern void muntrace (void);
   306 
   307 /* Statistics available to the user.  */
   308 struct mstats
   309   {
   310     size_t bytes_total; /* Total size of the heap. */
   311     size_t chunks_used; /* Chunks allocated by the user. */
   312     size_t bytes_used;  /* Byte total of user-allocated chunks. */
   313     size_t chunks_free; /* Chunks in the free list. */
   314     size_t bytes_free;  /* Byte total of chunks in the free list. */
   315   };
   316 
   317 /* Pick up the current statistics. */
   318 extern struct mstats mstats (void);
   319 
   320 /* Call WARNFUN with a warning message when memory usage is high.  */
   321 extern void memory_warnings (void *start, void (*warnfun) (const char *));
   322 
   323 #ifdef  __cplusplus
   324 }
   325 #endif
   326 
   327 /* Memory allocator `malloc'.
   328    Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
   329                   Written May 1989 by Mike Haertel.
   330 
   331 This library is free software; you can redistribute it and/or
   332 modify it under the terms of the GNU General Public License as
   333 published by the Free Software Foundation; either version 2 of the
   334 License, or (at your option) any later version.
   335 
   336 This library is distributed in the hope that it will be useful,
   337 but WITHOUT ANY WARRANTY; without even the implied warranty of
   338 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   339 General Public License for more details.
   340 
   341 You should have received a copy of the GNU General Public
   342 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
   343 
   344    The author may be reached (Email) at the address mike@ai.mit.edu,
   345    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
   346 
   347 #include <errno.h>
   348 
   349 void *(*__morecore) (ptrdiff_t size) = __default_morecore;
   350 
   351 /* Debugging hook for `malloc'.  */
   352 void *(*__malloc_hook) (size_t size);
   353 
   354 /* Pointer to the base of the first block.  */
   355 char *_heapbase;
   356 
   357 /* Block information table.  Allocated with align/__free (not malloc/free).  */
   358 malloc_info *_heapinfo;
   359 
   360 /* Number of info entries.  */
   361 static size_t heapsize;
   362 
   363 /* Search index in the info table.  */
   364 size_t _heapindex;
   365 
   366 /* Limit of valid info table indices.  */
   367 size_t _heaplimit;
   368 
   369 /* Free lists for each fragment size.  */
   370 struct list _fraghead[BLOCKLOG];
   371 
   372 /* Instrumentation.  */
   373 size_t _chunks_used;
   374 size_t _bytes_used;
   375 size_t _chunks_free;
   376 size_t _bytes_free;
   377 
   378 /* Are you experienced?  */
   379 int __malloc_initialized;
   380 
   381 size_t __malloc_extra_blocks;
   382 
   383 void (*__malloc_initialize_hook) (void);
   384 void (*__after_morecore_hook) (void);
   385 
   386 #if defined GC_MALLOC_CHECK && defined GC_PROTECT_MALLOC_STATE
   387 
   388 /* Some code for hunting a bug writing into _heapinfo.
   389 
   390    Call this macro with argument PROT non-zero to protect internal
   391    malloc state against writing to it, call it with a zero argument to
   392    make it readable and writable.
   393 
   394    Note that this only works if BLOCKSIZE == page size, which is
   395    the case on the i386.  */
   396 
   397 #include <sys/types.h>
   398 #include <sys/mman.h>
   399 
   400 static int state_protected_p;
   401 static size_t last_state_size;
   402 static malloc_info *last_heapinfo;
   403 
   404 void
   405 protect_malloc_state (int protect_p)
   406 {
   407   /* If _heapinfo has been relocated, make sure its old location
   408      isn't left read-only; it will be reused by malloc.  */
   409   if (_heapinfo != last_heapinfo
   410       && last_heapinfo
   411       && state_protected_p)
   412     mprotect (last_heapinfo, last_state_size, PROT_READ | PROT_WRITE);
   413 
   414   last_state_size = _heaplimit * sizeof *_heapinfo;
   415   last_heapinfo   = _heapinfo;
   416 
   417   if (protect_p != state_protected_p)
   418     {
   419       state_protected_p = protect_p;
   420       if (mprotect (_heapinfo, last_state_size,
   421                     protect_p ? PROT_READ : PROT_READ | PROT_WRITE) != 0)
   422         abort ();
   423     }
   424 }
   425 
   426 #define PROTECT_MALLOC_STATE(PROT) protect_malloc_state (PROT)
   427 
   428 #else
   429 #define PROTECT_MALLOC_STATE(PROT)      /* empty */
   430 #endif
   431 
   432 
   433 /* Aligned allocation.  */
   434 static void *
   435 align (size_t size)
   436 {
   437   void *result;
   438   ptrdiff_t adj;
   439 
   440   /* align accepts an unsigned argument, but __morecore accepts a
   441      signed one.  This could lead to trouble if SIZE overflows the
   442      ptrdiff_t type accepted by __morecore.  We just punt in that
   443      case, since they are requesting a ludicrous amount anyway.  */
   444   if (PTRDIFF_MAX < size)
   445     result = 0;
   446   else
   447     result = (*__morecore) (size);
   448   adj = (uintptr_t) result % BLOCKSIZE;
   449   if (adj != 0)
   450     {
   451       adj = BLOCKSIZE - adj;
   452       (*__morecore) (adj);
   453       result = (char *) result + adj;
   454     }
   455 
   456   if (__after_morecore_hook)
   457     (*__after_morecore_hook) ();
   458 
   459   return result;
   460 }
   461 
   462 /* Get SIZE bytes, if we can get them starting at END.
   463    Return the address of the space we got.
   464    If we cannot get space at END, fail and return 0.  */
   465 static void *
   466 get_contiguous_space (ptrdiff_t size, void *position)
   467 {
   468   void *before;
   469   void *after;
   470 
   471   before = (*__morecore) (0);
   472   /* If we can tell in advance that the break is at the wrong place,
   473      fail now.  */
   474   if (before != position)
   475     return 0;
   476 
   477   /* Allocate SIZE bytes and get the address of them.  */
   478   after = (*__morecore) (size);
   479   if (!after)
   480     return 0;
   481 
   482   /* It was not contiguous--reject it.  */
   483   if (after != position)
   484     {
   485       (*__morecore) (- size);
   486       return 0;
   487     }
   488 
   489   return after;
   490 }
   491 
   492 
   493 /* This is called when `_heapinfo' and `heapsize' have just
   494    been set to describe a new info table.  Set up the table
   495    to describe itself and account for it in the statistics.  */
   496 static void
   497 register_heapinfo (void)
   498 {
   499   size_t block, blocks;
   500 
   501   block = BLOCK (_heapinfo);
   502   blocks = BLOCKIFY (heapsize * sizeof (malloc_info));
   503 
   504   /* Account for the _heapinfo block itself in the statistics.  */
   505   _bytes_used += blocks * BLOCKSIZE;
   506   ++_chunks_used;
   507 
   508   /* Describe the heapinfo block itself in the heapinfo.  */
   509   _heapinfo[block].busy.type = 0;
   510   _heapinfo[block].busy.info.size = blocks;
   511   /* Leave back-pointers for malloc_find_address.  */
   512   while (--blocks > 0)
   513     _heapinfo[block + blocks].busy.info.size = -blocks;
   514 }
   515 
   516 #ifdef USE_PTHREAD
   517 pthread_mutex_t _malloc_mutex = PTHREAD_MUTEX_INITIALIZER;
   518 pthread_mutex_t _aligned_blocks_mutex = PTHREAD_MUTEX_INITIALIZER;
   519 int _malloc_thread_enabled_p;
   520 
   521 static void
   522 malloc_atfork_handler_prepare (void)
   523 {
   524   LOCK ();
   525   LOCK_ALIGNED_BLOCKS ();
   526 }
   527 
   528 static void
   529 malloc_atfork_handler_parent (void)
   530 {
   531   UNLOCK_ALIGNED_BLOCKS ();
   532   UNLOCK ();
   533 }
   534 
   535 static void
   536 malloc_atfork_handler_child (void)
   537 {
   538   UNLOCK_ALIGNED_BLOCKS ();
   539   UNLOCK ();
   540 }
   541 
   542 /* Set up mutexes and make malloc etc. thread-safe.  */
   543 void
   544 malloc_enable_thread (void)
   545 {
   546   if (_malloc_thread_enabled_p)
   547     return;
   548 
   549   /* Some pthread implementations call malloc for statically
   550      initialized mutexes when they are used first.  To avoid such a
   551      situation, we initialize mutexes here while their use is
   552      disabled in malloc etc.  */
   553   pthread_mutex_init (&_malloc_mutex, NULL);
   554   pthread_mutex_init (&_aligned_blocks_mutex, NULL);
   555   pthread_atfork (malloc_atfork_handler_prepare,
   556                   malloc_atfork_handler_parent,
   557                   malloc_atfork_handler_child);
   558   _malloc_thread_enabled_p = 1;
   559 }
   560 #endif  /* USE_PTHREAD */
   561 
   562 static void
   563 malloc_initialize_1 (void)
   564 {
   565 #ifdef GC_MCHECK
   566   mcheck (NULL);
   567 #endif
   568 
   569   if (__malloc_initialize_hook)
   570     (*__malloc_initialize_hook) ();
   571 
   572   heapsize = HEAP / BLOCKSIZE;
   573   _heapinfo = align (heapsize * sizeof (malloc_info));
   574   if (_heapinfo == NULL)
   575     return;
   576   memset (_heapinfo, 0, heapsize * sizeof (malloc_info));
   577   _heapinfo[0].free.size = 0;
   578   _heapinfo[0].free.next = _heapinfo[0].free.prev = 0;
   579   _heapindex = 0;
   580   _heapbase = (char *) _heapinfo;
   581   _heaplimit = BLOCK (_heapbase + heapsize * sizeof (malloc_info));
   582 
   583   register_heapinfo ();
   584 
   585   __malloc_initialized = 1;
   586   PROTECT_MALLOC_STATE (1);
   587   return;
   588 }
   589 
   590 /* Set everything up and remember that we have.
   591    main will call malloc which calls this function.  That is before any threads
   592    or signal handlers has been set up, so we don't need thread protection.  */
   593 int
   594 __malloc_initialize (void)
   595 {
   596   if (__malloc_initialized)
   597     return 0;
   598 
   599   malloc_initialize_1 ();
   600 
   601   return __malloc_initialized;
   602 }
   603 
   604 static int morecore_recursing;
   605 
   606 /* Get neatly aligned memory, initializing or
   607    growing the heap info table as necessary. */
   608 static void *
   609 morecore_nolock (size_t size)
   610 {
   611   void *result;
   612   malloc_info *newinfo, *oldinfo;
   613   size_t newsize;
   614 
   615   if (morecore_recursing)
   616     /* Avoid recursion.  The caller will know how to handle a null return.  */
   617     return NULL;
   618 
   619   result = align (size);
   620   if (result == NULL)
   621     return NULL;
   622 
   623   PROTECT_MALLOC_STATE (0);
   624 
   625   /* Check if we need to grow the info table.  */
   626   if ((size_t) BLOCK ((char *) result + size) > heapsize)
   627     {
   628       /* Calculate the new _heapinfo table size.  We do not account for the
   629          added blocks in the table itself, as we hope to place them in
   630          existing free space, which is already covered by part of the
   631          existing table.  */
   632       newsize = heapsize;
   633       do
   634         newsize *= 2;
   635       while ((size_t) BLOCK ((char *) result + size) > newsize);
   636 
   637       /* We must not reuse existing core for the new info table when called
   638          from realloc in the case of growing a large block, because the
   639          block being grown is momentarily marked as free.  In this case
   640          _heaplimit is zero so we know not to reuse space for internal
   641          allocation.  */
   642       if (_heaplimit != 0)
   643         {
   644           /* First try to allocate the new info table in core we already
   645              have, in the usual way using realloc.  If realloc cannot
   646              extend it in place or relocate it to existing sufficient core,
   647              we will get called again, and the code above will notice the
   648              `morecore_recursing' flag and return null.  */
   649           int save = errno;     /* Don't want to clobber errno with ENOMEM.  */
   650           morecore_recursing = 1;
   651           newinfo = _realloc_internal_nolock (_heapinfo,
   652                                               newsize * sizeof (malloc_info));
   653           morecore_recursing = 0;
   654           if (newinfo == NULL)
   655             errno = save;
   656           else
   657             {
   658               /* We found some space in core, and realloc has put the old
   659                  table's blocks on the free list.  Now zero the new part
   660                  of the table and install the new table location.  */
   661               memset (&newinfo[heapsize], 0,
   662                       (newsize - heapsize) * sizeof (malloc_info));
   663               _heapinfo = newinfo;
   664               heapsize = newsize;
   665               goto got_heap;
   666             }
   667         }
   668 
   669       /* Allocate new space for the malloc info table.  */
   670       while (1)
   671         {
   672           newinfo = align (newsize * sizeof (malloc_info));
   673 
   674           /* Did it fail?  */
   675           if (newinfo == NULL)
   676             {
   677               (*__morecore) (-size);
   678               return NULL;
   679             }
   680 
   681           /* Is it big enough to record status for its own space?
   682              If so, we win.  */
   683           if ((size_t) BLOCK ((char *) newinfo
   684                               + newsize * sizeof (malloc_info))
   685               < newsize)
   686             break;
   687 
   688           /* Must try again.  First give back most of what we just got.  */
   689           (*__morecore) (- newsize * sizeof (malloc_info));
   690           newsize *= 2;
   691         }
   692 
   693       /* Copy the old table to the beginning of the new,
   694          and zero the rest of the new table.  */
   695       memcpy (newinfo, _heapinfo, heapsize * sizeof (malloc_info));
   696       memset (&newinfo[heapsize], 0,
   697               (newsize - heapsize) * sizeof (malloc_info));
   698       oldinfo = _heapinfo;
   699       _heapinfo = newinfo;
   700       heapsize = newsize;
   701 
   702       register_heapinfo ();
   703 
   704       /* Reset _heaplimit so _free_internal never decides
   705          it can relocate or resize the info table.  */
   706       _heaplimit = 0;
   707       _free_internal_nolock (oldinfo);
   708       PROTECT_MALLOC_STATE (0);
   709 
   710       /* The new heap limit includes the new table just allocated.  */
   711       _heaplimit = BLOCK ((char *) newinfo + heapsize * sizeof (malloc_info));
   712       return result;
   713     }
   714 
   715  got_heap:
   716   _heaplimit = BLOCK ((char *) result + size);
   717   return result;
   718 }
   719 
   720 /* Allocate memory from the heap.  */
   721 void *
   722 _malloc_internal_nolock (size_t size)
   723 {
   724   void *result;
   725   size_t block, blocks, lastblocks, start;
   726   register size_t i;
   727   struct list *next;
   728 
   729   /* ANSI C allows `malloc (0)' to either return NULL, or to return a
   730      valid address you can realloc and free (though not dereference).
   731 
   732      It turns out that some extant code (sunrpc, at least Ultrix's version)
   733      expects `malloc (0)' to return non-NULL and breaks otherwise.
   734      Be compatible.  */
   735 
   736 #if     0
   737   if (size == 0)
   738     return NULL;
   739 #endif
   740 
   741   PROTECT_MALLOC_STATE (0);
   742 
   743   if (size < sizeof (struct list))
   744     size = sizeof (struct list);
   745 
   746   /* Determine the allocation policy based on the request size.  */
   747   if (size <= BLOCKSIZE / 2)
   748     {
   749       /* Small allocation to receive a fragment of a block.
   750          Determine the logarithm to base two of the fragment size. */
   751       register size_t log = 1;
   752       --size;
   753       while ((size /= 2) != 0)
   754         ++log;
   755 
   756       /* Look in the fragment lists for a
   757          free fragment of the desired size. */
   758       next = _fraghead[log].next;
   759       if (next != NULL)
   760         {
   761           /* There are free fragments of this size.
   762              Pop a fragment out of the fragment list and return it.
   763              Update the block's nfree and first counters. */
   764           result = next;
   765           next->prev->next = next->next;
   766           if (next->next != NULL)
   767             next->next->prev = next->prev;
   768           block = BLOCK (result);
   769           if (--_heapinfo[block].busy.info.frag.nfree != 0)
   770             _heapinfo[block].busy.info.frag.first =
   771               (uintptr_t) next->next % BLOCKSIZE >> log;
   772 
   773           /* Update the statistics.  */
   774           ++_chunks_used;
   775           _bytes_used += 1 << log;
   776           --_chunks_free;
   777           _bytes_free -= 1 << log;
   778         }
   779       else
   780         {
   781           /* No free fragments of the desired size, so get a new block
   782              and break it into fragments, returning the first.  */
   783 #ifdef GC_MALLOC_CHECK
   784           result = _malloc_internal_nolock (BLOCKSIZE);
   785           PROTECT_MALLOC_STATE (0);
   786 #elif defined (USE_PTHREAD)
   787           result = _malloc_internal_nolock (BLOCKSIZE);
   788 #else
   789           result = malloc (BLOCKSIZE);
   790 #endif
   791           if (result == NULL)
   792             {
   793               PROTECT_MALLOC_STATE (1);
   794               goto out;
   795             }
   796 
   797           /* Link all fragments but the first into the free list.  */
   798           next = (struct list *) ((char *) result + (1 << log));
   799           next->next = NULL;
   800           next->prev = &_fraghead[log];
   801           _fraghead[log].next = next;
   802 
   803           for (i = 2; i < (size_t) (BLOCKSIZE >> log); ++i)
   804             {
   805               next = (struct list *) ((char *) result + (i << log));
   806               next->next = _fraghead[log].next;
   807               next->prev = &_fraghead[log];
   808               next->prev->next = next;
   809               next->next->prev = next;
   810             }
   811 
   812           /* Initialize the nfree and first counters for this block.  */
   813           block = BLOCK (result);
   814           _heapinfo[block].busy.type = log;
   815           _heapinfo[block].busy.info.frag.nfree = i - 1;
   816           _heapinfo[block].busy.info.frag.first = i - 1;
   817 
   818           _chunks_free += (BLOCKSIZE >> log) - 1;
   819           _bytes_free += BLOCKSIZE - (1 << log);
   820           _bytes_used -= BLOCKSIZE - (1 << log);
   821         }
   822     }
   823   else
   824     {
   825       /* Large allocation to receive one or more blocks.
   826          Search the free list in a circle starting at the last place visited.
   827          If we loop completely around without finding a large enough
   828          space we will have to get more memory from the system.  */
   829       blocks = BLOCKIFY (size);
   830       start = block = _heapindex;
   831       while (_heapinfo[block].free.size < blocks)
   832         {
   833           block = _heapinfo[block].free.next;
   834           if (block == start)
   835             {
   836               /* Need to get more from the system.  Get a little extra.  */
   837               size_t wantblocks = blocks + __malloc_extra_blocks;
   838               block = _heapinfo[0].free.prev;
   839               lastblocks = _heapinfo[block].free.size;
   840               /* Check to see if the new core will be contiguous with the
   841                  final free block; if so we don't need to get as much.  */
   842               if (_heaplimit != 0 && block + lastblocks == _heaplimit &&
   843                   /* We can't do this if we will have to make the heap info
   844                      table bigger to accommodate the new space.  */
   845                   block + wantblocks <= heapsize &&
   846                   get_contiguous_space ((wantblocks - lastblocks) * BLOCKSIZE,
   847                                         ADDRESS (block + lastblocks)))
   848                 {
   849                   /* We got it contiguously.  Which block we are extending
   850                      (the `final free block' referred to above) might have
   851                      changed, if it got combined with a freed info table.  */
   852                   block = _heapinfo[0].free.prev;
   853                   _heapinfo[block].free.size += (wantblocks - lastblocks);
   854                   _bytes_free += (wantblocks - lastblocks) * BLOCKSIZE;
   855                   _heaplimit += wantblocks - lastblocks;
   856                   continue;
   857                 }
   858               result = morecore_nolock (wantblocks * BLOCKSIZE);
   859               if (result == NULL)
   860                 goto out;
   861               block = BLOCK (result);
   862               /* Put the new block at the end of the free list.  */
   863               _heapinfo[block].free.size = wantblocks;
   864               _heapinfo[block].free.prev = _heapinfo[0].free.prev;
   865               _heapinfo[block].free.next = 0;
   866               _heapinfo[0].free.prev = block;
   867               _heapinfo[_heapinfo[block].free.prev].free.next = block;
   868               ++_chunks_free;
   869               /* Now loop to use some of that block for this allocation.  */
   870             }
   871         }
   872 
   873       /* At this point we have found a suitable free list entry.
   874          Figure out how to remove what we need from the list. */
   875       result = ADDRESS (block);
   876       if (_heapinfo[block].free.size > blocks)
   877         {
   878           /* The block we found has a bit left over,
   879              so relink the tail end back into the free list. */
   880           _heapinfo[block + blocks].free.size
   881             = _heapinfo[block].free.size - blocks;
   882           _heapinfo[block + blocks].free.next
   883             = _heapinfo[block].free.next;
   884           _heapinfo[block + blocks].free.prev
   885             = _heapinfo[block].free.prev;
   886           _heapinfo[_heapinfo[block].free.prev].free.next
   887             = _heapinfo[_heapinfo[block].free.next].free.prev
   888             = _heapindex = block + blocks;
   889         }
   890       else
   891         {
   892           /* The block exactly matches our requirements,
   893              so just remove it from the list. */
   894           _heapinfo[_heapinfo[block].free.next].free.prev
   895             = _heapinfo[block].free.prev;
   896           _heapinfo[_heapinfo[block].free.prev].free.next
   897             = _heapindex = _heapinfo[block].free.next;
   898           --_chunks_free;
   899         }
   900 
   901       _heapinfo[block].busy.type = 0;
   902       _heapinfo[block].busy.info.size = blocks;
   903       ++_chunks_used;
   904       _bytes_used += blocks * BLOCKSIZE;
   905       _bytes_free -= blocks * BLOCKSIZE;
   906 
   907       /* Mark all the blocks of the object just allocated except for the
   908          first with a negative number so you can find the first block by
   909          adding that adjustment.  */
   910       while (--blocks > 0)
   911         _heapinfo[block + blocks].busy.info.size = -blocks;
   912     }
   913 
   914   PROTECT_MALLOC_STATE (1);
   915  out:
   916   return result;
   917 }
   918 
   919 void *
   920 _malloc_internal (size_t size)
   921 {
   922   void *result;
   923 
   924   LOCK ();
   925   result = _malloc_internal_nolock (size);
   926   UNLOCK ();
   927 
   928   return result;
   929 }
   930 
   931 void *
   932 malloc (size_t size)
   933 {
   934   void *(*hook) (size_t);
   935 
   936   if (!__malloc_initialized && !__malloc_initialize ())
   937     return NULL;
   938 
   939   /* Copy the value of __malloc_hook to an automatic variable in case
   940      __malloc_hook is modified in another thread between its
   941      NULL-check and the use.
   942 
   943      Note: Strictly speaking, this is not a right solution.  We should
   944      use mutexes to access non-read-only variables that are shared
   945      among multiple threads.  We just leave it for compatibility with
   946      glibc malloc (i.e., assignments to __malloc_hook) for now.  */
   947   hook = __malloc_hook;
   948   return (hook != NULL ? *hook : _malloc_internal) (size);
   949 }
   950 
   951 #ifndef _LIBC
   952 
   953 /* On some ANSI C systems, some libc functions call _malloc, _free
   954    and _realloc.  Make them use the GNU functions.  */
   955 
   956 extern void *_malloc (size_t);
   957 extern void _free (void *);
   958 extern void *_realloc (void *, size_t);
   959 
   960 void *
   961 _malloc (size_t size)
   962 {
   963   return malloc (size);
   964 }
   965 
   966 void
   967 _free (void *ptr)
   968 {
   969   free (ptr);
   970 }
   971 
   972 void *
   973 _realloc (void *ptr, size_t size)
   974 {
   975   return realloc (ptr, size);
   976 }
   977 
   978 #endif
   979 /* Free a block of memory allocated by `malloc'.
   980    Copyright 1990, 1991, 1992, 1994, 1995 Free Software Foundation, Inc.
   981                   Written May 1989 by Mike Haertel.
   982 
   983 This library is free software; you can redistribute it and/or
   984 modify it under the terms of the GNU General Public License as
   985 published by the Free Software Foundation; either version 2 of the
   986 License, or (at your option) any later version.
   987 
   988 This library is distributed in the hope that it will be useful,
   989 but WITHOUT ANY WARRANTY; without even the implied warranty of
   990 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   991 General Public License for more details.
   992 
   993 You should have received a copy of the GNU General Public
   994 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
   995 
   996    The author may be reached (Email) at the address mike@ai.mit.edu,
   997    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
   998 
   999 
  1000 /* Debugging hook for free.  */
  1001 void (*__free_hook) (void *__ptr);
  1002 
  1003 /* List of blocks allocated by aligned_alloc.  */
  1004 struct alignlist *_aligned_blocks = NULL;
  1005 
  1006 /* Return memory to the heap.
  1007    Like `_free_internal' but don't lock mutex.  */
  1008 void
  1009 _free_internal_nolock (void *ptr)
  1010 {
  1011   int type;
  1012   size_t block, blocks;
  1013   register size_t i;
  1014   struct list *prev, *next;
  1015   void *curbrk;
  1016   const size_t lesscore_threshold
  1017     /* Threshold of free space at which we will return some to the system.  */
  1018     = FINAL_FREE_BLOCKS + 2 * __malloc_extra_blocks;
  1019 
  1020   register struct alignlist *l;
  1021 
  1022   if (ptr == NULL)
  1023     return;
  1024 
  1025   PROTECT_MALLOC_STATE (0);
  1026 
  1027   LOCK_ALIGNED_BLOCKS ();
  1028   for (l = _aligned_blocks; l != NULL; l = l->next)
  1029     if (l->aligned == ptr)
  1030       {
  1031         l->aligned = NULL;      /* Mark the slot in the list as free.  */
  1032         ptr = l->exact;
  1033         break;
  1034       }
  1035   UNLOCK_ALIGNED_BLOCKS ();
  1036 
  1037   block = BLOCK (ptr);
  1038 
  1039   type = _heapinfo[block].busy.type;
  1040   switch (type)
  1041     {
  1042     case 0:
  1043       /* Get as many statistics as early as we can.  */
  1044       --_chunks_used;
  1045       _bytes_used -= _heapinfo[block].busy.info.size * BLOCKSIZE;
  1046       _bytes_free += _heapinfo[block].busy.info.size * BLOCKSIZE;
  1047 
  1048       /* Find the free cluster previous to this one in the free list.
  1049          Start searching at the last block referenced; this may benefit
  1050          programs with locality of allocation.  */
  1051       i = _heapindex;
  1052       if (i > block)
  1053         while (i > block)
  1054           i = _heapinfo[i].free.prev;
  1055       else
  1056         {
  1057           do
  1058             i = _heapinfo[i].free.next;
  1059           while (i > 0 && i < block);
  1060           i = _heapinfo[i].free.prev;
  1061         }
  1062 
  1063       /* Determine how to link this block into the free list.  */
  1064       if (block == i + _heapinfo[i].free.size)
  1065         {
  1066           /* Coalesce this block with its predecessor.  */
  1067           _heapinfo[i].free.size += _heapinfo[block].busy.info.size;
  1068           block = i;
  1069         }
  1070       else
  1071         {
  1072           /* Really link this block back into the free list.  */
  1073           _heapinfo[block].free.size = _heapinfo[block].busy.info.size;
  1074           _heapinfo[block].free.next = _heapinfo[i].free.next;
  1075           _heapinfo[block].free.prev = i;
  1076           _heapinfo[i].free.next = block;
  1077           _heapinfo[_heapinfo[block].free.next].free.prev = block;
  1078           ++_chunks_free;
  1079         }
  1080 
  1081       /* Now that the block is linked in, see if we can coalesce it
  1082          with its successor (by deleting its successor from the list
  1083          and adding in its size).  */
  1084       if (block + _heapinfo[block].free.size == _heapinfo[block].free.next)
  1085         {
  1086           _heapinfo[block].free.size
  1087             += _heapinfo[_heapinfo[block].free.next].free.size;
  1088           _heapinfo[block].free.next
  1089             = _heapinfo[_heapinfo[block].free.next].free.next;
  1090           _heapinfo[_heapinfo[block].free.next].free.prev = block;
  1091           --_chunks_free;
  1092         }
  1093 
  1094       /* How many trailing free blocks are there now?  */
  1095       blocks = _heapinfo[block].free.size;
  1096 
  1097       /* Where is the current end of accessible core?  */
  1098       curbrk = (*__morecore) (0);
  1099 
  1100       if (_heaplimit != 0 && curbrk == ADDRESS (_heaplimit))
  1101         {
  1102           /* The end of the malloc heap is at the end of accessible core.
  1103              It's possible that moving _heapinfo will allow us to
  1104              return some space to the system.  */
  1105 
  1106           size_t info_block = BLOCK (_heapinfo);
  1107           size_t info_blocks = _heapinfo[info_block].busy.info.size;
  1108           size_t prev_block = _heapinfo[block].free.prev;
  1109           size_t prev_blocks = _heapinfo[prev_block].free.size;
  1110           size_t next_block = _heapinfo[block].free.next;
  1111           size_t next_blocks = _heapinfo[next_block].free.size;
  1112 
  1113           if (/* Win if this block being freed is last in core, the info table
  1114                  is just before it, the previous free block is just before the
  1115                  info table, and the two free blocks together form a useful
  1116                  amount to return to the system.  */
  1117               (block + blocks == _heaplimit &&
  1118                info_block + info_blocks == block &&
  1119                prev_block != 0 && prev_block + prev_blocks == info_block &&
  1120                blocks + prev_blocks >= lesscore_threshold) ||
  1121               /* Nope, not the case.  We can also win if this block being
  1122                  freed is just before the info table, and the table extends
  1123                  to the end of core or is followed only by a free block,
  1124                  and the total free space is worth returning to the system.  */
  1125               (block + blocks == info_block &&
  1126                ((info_block + info_blocks == _heaplimit &&
  1127                  blocks >= lesscore_threshold) ||
  1128                 (info_block + info_blocks == next_block &&
  1129                  next_block + next_blocks == _heaplimit &&
  1130                  blocks + next_blocks >= lesscore_threshold)))
  1131               )
  1132             {
  1133               malloc_info *newinfo;
  1134               size_t oldlimit = _heaplimit;
  1135 
  1136               /* Free the old info table, clearing _heaplimit to avoid
  1137                  recursion into this code.  We don't want to return the
  1138                  table's blocks to the system before we have copied them to
  1139                  the new location.  */
  1140               _heaplimit = 0;
  1141               _free_internal_nolock (_heapinfo);
  1142               _heaplimit = oldlimit;
  1143 
  1144               /* Tell malloc to search from the beginning of the heap for
  1145                  free blocks, so it doesn't reuse the ones just freed.  */
  1146               _heapindex = 0;
  1147 
  1148               /* Allocate new space for the info table and move its data.  */
  1149               newinfo = _malloc_internal_nolock (info_blocks * BLOCKSIZE);
  1150               PROTECT_MALLOC_STATE (0);
  1151               memmove (newinfo, _heapinfo, info_blocks * BLOCKSIZE);
  1152               _heapinfo = newinfo;
  1153 
  1154               /* We should now have coalesced the free block with the
  1155                  blocks freed from the old info table.  Examine the entire
  1156                  trailing free block to decide below whether to return some
  1157                  to the system.  */
  1158               block = _heapinfo[0].free.prev;
  1159               blocks = _heapinfo[block].free.size;
  1160             }
  1161 
  1162           /* Now see if we can return stuff to the system.  */
  1163           if (block + blocks == _heaplimit && blocks >= lesscore_threshold)
  1164             {
  1165               register size_t bytes = blocks * BLOCKSIZE;
  1166               _heaplimit -= blocks;
  1167               (*__morecore) (-bytes);
  1168               _heapinfo[_heapinfo[block].free.prev].free.next
  1169                 = _heapinfo[block].free.next;
  1170               _heapinfo[_heapinfo[block].free.next].free.prev
  1171                 = _heapinfo[block].free.prev;
  1172               block = _heapinfo[block].free.prev;
  1173               --_chunks_free;
  1174               _bytes_free -= bytes;
  1175             }
  1176         }
  1177 
  1178       /* Set the next search to begin at this block.  */
  1179       _heapindex = block;
  1180       break;
  1181 
  1182     default:
  1183       /* Do some of the statistics.  */
  1184       --_chunks_used;
  1185       _bytes_used -= 1 << type;
  1186       ++_chunks_free;
  1187       _bytes_free += 1 << type;
  1188 
  1189       /* Get the address of the first free fragment in this block.  */
  1190       prev = (struct list *) ((char *) ADDRESS (block) +
  1191                               (_heapinfo[block].busy.info.frag.first << type));
  1192 
  1193       if (_heapinfo[block].busy.info.frag.nfree == (BLOCKSIZE >> type) - 1)
  1194         {
  1195           /* If all fragments of this block are free, remove them
  1196              from the fragment list and free the whole block.  */
  1197           next = prev;
  1198           for (i = 1; i < (size_t) (BLOCKSIZE >> type); ++i)
  1199             next = next->next;
  1200           prev->prev->next = next;
  1201           if (next != NULL)
  1202             next->prev = prev->prev;
  1203           _heapinfo[block].busy.type = 0;
  1204           _heapinfo[block].busy.info.size = 1;
  1205 
  1206           /* Keep the statistics accurate.  */
  1207           ++_chunks_used;
  1208           _bytes_used += BLOCKSIZE;
  1209           _chunks_free -= BLOCKSIZE >> type;
  1210           _bytes_free -= BLOCKSIZE;
  1211 
  1212 #if defined (GC_MALLOC_CHECK) || defined (USE_PTHREAD)
  1213           _free_internal_nolock (ADDRESS (block));
  1214 #else
  1215           free (ADDRESS (block));
  1216 #endif
  1217         }
  1218       else if (_heapinfo[block].busy.info.frag.nfree != 0)
  1219         {
  1220           /* If some fragments of this block are free, link this
  1221              fragment into the fragment list after the first free
  1222              fragment of this block. */
  1223           next = ptr;
  1224           next->next = prev->next;
  1225           next->prev = prev;
  1226           prev->next = next;
  1227           if (next->next != NULL)
  1228             next->next->prev = next;
  1229           ++_heapinfo[block].busy.info.frag.nfree;
  1230         }
  1231       else
  1232         {
  1233           /* No fragments of this block are free, so link this
  1234              fragment into the fragment list and announce that
  1235              it is the first free fragment of this block. */
  1236           prev = ptr;
  1237           _heapinfo[block].busy.info.frag.nfree = 1;
  1238           _heapinfo[block].busy.info.frag.first =
  1239             (uintptr_t) ptr % BLOCKSIZE >> type;
  1240           prev->next = _fraghead[type].next;
  1241           prev->prev = &_fraghead[type];
  1242           prev->prev->next = prev;
  1243           if (prev->next != NULL)
  1244             prev->next->prev = prev;
  1245         }
  1246       break;
  1247     }
  1248 
  1249   PROTECT_MALLOC_STATE (1);
  1250 }
  1251 
  1252 /* Return memory to the heap.
  1253    Like `free' but don't call a __free_hook if there is one.  */
  1254 void
  1255 _free_internal (void *ptr)
  1256 {
  1257   LOCK ();
  1258   _free_internal_nolock (ptr);
  1259   UNLOCK ();
  1260 }
  1261 
  1262 /* Return memory to the heap.  */
  1263 
  1264 void
  1265 free (void *ptr)
  1266 {
  1267   void (*hook) (void *) = __free_hook;
  1268 
  1269   if (hook != NULL)
  1270     (*hook) (ptr);
  1271   else
  1272     _free_internal (ptr);
  1273 }
  1274 
  1275 /* Define the `cfree' alias for `free'.  */
  1276 #ifdef weak_alias
  1277 weak_alias (free, cfree)
  1278 #else
  1279 void
  1280 cfree (void *ptr)
  1281 {
  1282   free (ptr);
  1283 }
  1284 #endif
  1285 /* Change the size of a block allocated by `malloc'.
  1286    Copyright 1990, 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
  1287                      Written May 1989 by Mike Haertel.
  1288 
  1289 This library is free software; you can redistribute it and/or
  1290 modify it under the terms of the GNU General Public License as
  1291 published by the Free Software Foundation; either version 2 of the
  1292 License, or (at your option) any later version.
  1293 
  1294 This library is distributed in the hope that it will be useful,
  1295 but WITHOUT ANY WARRANTY; without even the implied warranty of
  1296 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  1297 General Public License for more details.
  1298 
  1299 You should have received a copy of the GNU General Public
  1300 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
  1301 
  1302    The author may be reached (Email) at the address mike@ai.mit.edu,
  1303    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
  1304 
  1305 #ifndef min
  1306 #define min(a, b) ((a) < (b) ? (a) : (b))
  1307 #endif
  1308 
  1309 /* Debugging hook for realloc.  */
  1310 void *(*__realloc_hook) (void *ptr, size_t size);
  1311 
  1312 /* Resize the given region to the new size, returning a pointer
  1313    to the (possibly moved) region.  This is optimized for speed;
  1314    some benchmarks seem to indicate that greater compactness is
  1315    achieved by unconditionally allocating and copying to a
  1316    new region.  This module has incestuous knowledge of the
  1317    internals of both free and malloc. */
  1318 void *
  1319 _realloc_internal_nolock (void *ptr, size_t size)
  1320 {
  1321   void *result;
  1322   int type;
  1323   size_t block, blocks, oldlimit;
  1324 
  1325   if (size == 0)
  1326     {
  1327       _free_internal_nolock (ptr);
  1328       return _malloc_internal_nolock (0);
  1329     }
  1330   else if (ptr == NULL)
  1331     return _malloc_internal_nolock (size);
  1332 
  1333   block = BLOCK (ptr);
  1334 
  1335   PROTECT_MALLOC_STATE (0);
  1336 
  1337   type = _heapinfo[block].busy.type;
  1338   switch (type)
  1339     {
  1340     case 0:
  1341       /* Maybe reallocate a large block to a small fragment.  */
  1342       if (size <= BLOCKSIZE / 2)
  1343         {
  1344           result = _malloc_internal_nolock (size);
  1345           if (result != NULL)
  1346             {
  1347               memcpy (result, ptr, size);
  1348               _free_internal_nolock (ptr);
  1349               goto out;
  1350             }
  1351         }
  1352 
  1353       /* The new size is a large allocation as well;
  1354          see if we can hold it in place. */
  1355       blocks = BLOCKIFY (size);
  1356       if (blocks < _heapinfo[block].busy.info.size)
  1357         {
  1358           /* The new size is smaller; return
  1359              excess memory to the free list. */
  1360           _heapinfo[block + blocks].busy.type = 0;
  1361           _heapinfo[block + blocks].busy.info.size
  1362             = _heapinfo[block].busy.info.size - blocks;
  1363           _heapinfo[block].busy.info.size = blocks;
  1364           /* We have just created a new chunk by splitting a chunk in two.
  1365              Now we will free this chunk; increment the statistics counter
  1366              so it doesn't become wrong when _free_internal decrements it.  */
  1367           ++_chunks_used;
  1368           _free_internal_nolock (ADDRESS (block + blocks));
  1369           result = ptr;
  1370         }
  1371       else if (blocks == _heapinfo[block].busy.info.size)
  1372         /* No size change necessary.  */
  1373         result = ptr;
  1374       else
  1375         {
  1376           /* Won't fit, so allocate a new region that will.
  1377              Free the old region first in case there is sufficient
  1378              adjacent free space to grow without moving. */
  1379           blocks = _heapinfo[block].busy.info.size;
  1380           /* Prevent free from actually returning memory to the system.  */
  1381           oldlimit = _heaplimit;
  1382           _heaplimit = 0;
  1383           _free_internal_nolock (ptr);
  1384           result = _malloc_internal_nolock (size);
  1385           PROTECT_MALLOC_STATE (0);
  1386           if (_heaplimit == 0)
  1387             _heaplimit = oldlimit;
  1388           if (result == NULL)
  1389             {
  1390               /* Now we're really in trouble.  We have to unfree
  1391                  the thing we just freed.  Unfortunately it might
  1392                  have been coalesced with its neighbors.  */
  1393               if (_heapindex == block)
  1394                 (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
  1395               else
  1396                 {
  1397                   void *previous
  1398                     = _malloc_internal_nolock ((block - _heapindex) * BLOCKSIZE);
  1399                   (void) _malloc_internal_nolock (blocks * BLOCKSIZE);
  1400                   _free_internal_nolock (previous);
  1401                 }
  1402               goto out;
  1403             }
  1404           if (ptr != result)
  1405             memmove (result, ptr, blocks * BLOCKSIZE);
  1406         }
  1407       break;
  1408 
  1409     default:
  1410       /* Old size is a fragment; type is logarithm
  1411          to base two of the fragment size.  */
  1412       if (size > (size_t) (1 << (type - 1)) &&
  1413           size <= (size_t) (1 << type))
  1414         /* The new size is the same kind of fragment.  */
  1415         result = ptr;
  1416       else
  1417         {
  1418           /* The new size is different; allocate a new space,
  1419              and copy the lesser of the new size and the old. */
  1420           result = _malloc_internal_nolock (size);
  1421           if (result == NULL)
  1422             goto out;
  1423           memcpy (result, ptr, min (size, (size_t) 1 << type));
  1424           _free_internal_nolock (ptr);
  1425         }
  1426       break;
  1427     }
  1428 
  1429   PROTECT_MALLOC_STATE (1);
  1430  out:
  1431   return result;
  1432 }
  1433 
  1434 void *
  1435 _realloc_internal (void *ptr, size_t size)
  1436 {
  1437   void *result;
  1438 
  1439   LOCK ();
  1440   result = _realloc_internal_nolock (ptr, size);
  1441   UNLOCK ();
  1442 
  1443   return result;
  1444 }
  1445 
  1446 void *
  1447 realloc (void *ptr, size_t size)
  1448 {
  1449   void *(*hook) (void *, size_t);
  1450 
  1451   if (!__malloc_initialized && !__malloc_initialize ())
  1452     return NULL;
  1453 
  1454   hook = __realloc_hook;
  1455   return (hook != NULL ? *hook : _realloc_internal) (ptr, size);
  1456 }
  1457 /* Copyright (C) 1991, 1992, 1994 Free Software Foundation, Inc.
  1458 
  1459 This library is free software; you can redistribute it and/or
  1460 modify it under the terms of the GNU General Public License as
  1461 published by the Free Software Foundation; either version 2 of the
  1462 License, or (at your option) any later version.
  1463 
  1464 This library is distributed in the hope that it will be useful,
  1465 but WITHOUT ANY WARRANTY; without even the implied warranty of
  1466 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  1467 General Public License for more details.
  1468 
  1469 You should have received a copy of the GNU General Public
  1470 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
  1471 
  1472    The author may be reached (Email) at the address mike@ai.mit.edu,
  1473    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
  1474 
  1475 /* Allocate an array of NMEMB elements each SIZE bytes long.
  1476    The entire array is initialized to zeros.  */
  1477 void *
  1478 calloc (size_t nmemb, size_t size)
  1479 {
  1480   void *result;
  1481   size_t bytes = nmemb * size;
  1482 
  1483   if (size != 0 && bytes / size != nmemb)
  1484     {
  1485       errno = ENOMEM;
  1486       return NULL;
  1487     }
  1488 
  1489   result = malloc (bytes);
  1490   if (result)
  1491     return memset (result, 0, bytes);
  1492   return result;
  1493 }
  1494 /* Copyright (C) 1991, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
  1495 This file is part of the GNU C Library.
  1496 
  1497 The GNU C Library is free software; you can redistribute it and/or modify
  1498 it under the terms of the GNU General Public License as published by
  1499 the Free Software Foundation; either version 2, or (at your option)
  1500 any later version.
  1501 
  1502 The GNU C Library is distributed in the hope that it will be useful,
  1503 but WITHOUT ANY WARRANTY; without even the implied warranty of
  1504 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  1505 GNU General Public License for more details.
  1506 
  1507 You should have received a copy of the GNU General Public License
  1508 along with the GNU C Library.  If not, see <https://www.gnu.org/licenses/>.  */
  1509 
  1510 /* uClibc defines __GNU_LIBRARY__, but it is not completely
  1511    compatible.  */
  1512 #if !defined (__GNU_LIBRARY__) || defined (__UCLIBC__)
  1513 #define __sbrk  sbrk
  1514 #else /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
  1515 /* It is best not to declare this and cast its result on foreign operating
  1516    systems with potentially hostile include files.  */
  1517 
  1518 extern void *__sbrk (ptrdiff_t increment);
  1519 #endif /* __GNU_LIBRARY__ && ! defined (__UCLIBC__) */
  1520 
  1521 /* Allocate INCREMENT more bytes of data space,
  1522    and return the start of data space, or NULL on errors.
  1523    If INCREMENT is negative, shrink data space.  */
  1524 void *
  1525 __default_morecore (ptrdiff_t increment)
  1526 {
  1527   void *result;
  1528 #if defined (CYGWIN)
  1529   if (!DUMPED)
  1530     {
  1531       return bss_sbrk (increment);
  1532     }
  1533 #endif
  1534   result = (void *) __sbrk (increment);
  1535   if (result == (void *) -1)
  1536     return NULL;
  1537   return result;
  1538 }
  1539 /* Copyright (C) 1991, 92, 93, 94, 95, 96 Free Software Foundation, Inc.
  1540 
  1541 This library is free software; you can redistribute it and/or
  1542 modify it under the terms of the GNU General Public License as
  1543 published by the Free Software Foundation; either version 2 of the
  1544 License, or (at your option) any later version.
  1545 
  1546 This library is distributed in the hope that it will be useful,
  1547 but WITHOUT ANY WARRANTY; without even the implied warranty of
  1548 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  1549 General Public License for more details.
  1550 
  1551 You should have received a copy of the GNU General Public
  1552 License along with this library.  If not, see <https://www.gnu.org/licenses/>.  */
  1553 
  1554 void *(*__memalign_hook) (size_t size, size_t alignment);
  1555 
  1556 void *
  1557 aligned_alloc (size_t alignment, size_t size)
  1558 {
  1559   void *result;
  1560   size_t adj, lastadj;
  1561   void *(*hook) (size_t, size_t) = __memalign_hook;
  1562 
  1563   if (hook)
  1564     return (*hook) (alignment, size);
  1565 
  1566   /* Allocate a block with enough extra space to pad the block with up to
  1567      (ALIGNMENT - 1) bytes if necessary.  */
  1568   if (- size < alignment)
  1569     {
  1570       errno = ENOMEM;
  1571       return NULL;
  1572     }
  1573   result = malloc (size + alignment - 1);
  1574   if (result == NULL)
  1575     return NULL;
  1576 
  1577   /* Figure out how much we will need to pad this particular block
  1578      to achieve the required alignment.  */
  1579   adj = alignment - (uintptr_t) result % alignment;
  1580   if (adj == alignment)
  1581     adj = 0;
  1582 
  1583   if (adj != alignment - 1)
  1584     {
  1585       do
  1586         {
  1587           /* Reallocate the block with only as much excess as it
  1588              needs.  */
  1589           free (result);
  1590           result = malloc (size + adj);
  1591           if (result == NULL)   /* Impossible unless interrupted.  */
  1592             return NULL;
  1593 
  1594           lastadj = adj;
  1595           adj = alignment - (uintptr_t) result % alignment;
  1596           if (adj == alignment)
  1597             adj = 0;
  1598           /* It's conceivable we might have been so unlucky as to get
  1599              a different block with weaker alignment.  If so, this
  1600              block is too short to contain SIZE after alignment
  1601              correction.  So we must try again and get another block,
  1602              slightly larger.  */
  1603         } while (adj > lastadj);
  1604     }
  1605 
  1606   if (adj != 0)
  1607     {
  1608       /* Record this block in the list of aligned blocks, so that `free'
  1609          can identify the pointer it is passed, which will be in the middle
  1610          of an allocated block.  */
  1611 
  1612       struct alignlist *l;
  1613       LOCK_ALIGNED_BLOCKS ();
  1614       for (l = _aligned_blocks; l != NULL; l = l->next)
  1615         if (l->aligned == NULL)
  1616           /* This slot is free.  Use it.  */
  1617           break;
  1618       if (l == NULL)
  1619         {
  1620           l = malloc (sizeof *l);
  1621           if (l != NULL)
  1622             {
  1623               l->next = _aligned_blocks;
  1624               _aligned_blocks = l;
  1625             }
  1626         }
  1627       if (l != NULL)
  1628         {
  1629           l->exact = result;
  1630           result = l->aligned = (char *) result + adj;
  1631         }
  1632       UNLOCK_ALIGNED_BLOCKS ();
  1633       if (l == NULL)
  1634         {
  1635           free (result);
  1636           result = NULL;
  1637         }
  1638     }
  1639 
  1640   return result;
  1641 }
  1642 
  1643 /* An obsolete alias for aligned_alloc, for any old libraries that use
  1644    this alias.  */
  1645 
  1646 void *
  1647 memalign (size_t alignment, size_t size)
  1648 {
  1649   return aligned_alloc (alignment, size);
  1650 }
  1651 
  1652 /* If HYBRID_MALLOC is defined, we may want to use the system
  1653    posix_memalign below.  */
  1654 #ifndef HYBRID_MALLOC
  1655 int
  1656 posix_memalign (void **memptr, size_t alignment, size_t size)
  1657 {
  1658   void *mem;
  1659 
  1660   if (alignment == 0
  1661       || alignment % sizeof (void *) != 0
  1662       || (alignment & (alignment - 1)) != 0)
  1663     return EINVAL;
  1664 
  1665   mem = aligned_alloc (alignment, size);
  1666   if (mem == NULL)
  1667     return ENOMEM;
  1668 
  1669   *memptr = mem;
  1670 
  1671   return 0;
  1672 }
  1673 #endif
  1674 
  1675 /* Allocate memory on a page boundary.
  1676    Copyright (C) 1991, 92, 93, 94, 96 Free Software Foundation, Inc.
  1677 
  1678 This library is free software; you can redistribute it and/or
  1679 modify it under the terms of the GNU General Public License as
  1680 published by the Free Software Foundation; either version 2 of the
  1681 License, or (at your option) any later version.
  1682 
  1683 This library is distributed in the hope that it will be useful,
  1684 but WITHOUT ANY WARRANTY; without even the implied warranty of
  1685 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  1686 General Public License for more details.
  1687 
  1688 You should have received a copy of the GNU General Public
  1689 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
  1690 
  1691    The author may be reached (Email) at the address mike@ai.mit.edu,
  1692    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
  1693 
  1694 /* Allocate SIZE bytes on a page boundary.  */
  1695 extern void *valloc (size_t);
  1696 
  1697 #if defined _SC_PAGESIZE || !defined HAVE_GETPAGESIZE
  1698 # include "getpagesize.h"
  1699 #elif !defined getpagesize
  1700 extern int getpagesize (void);
  1701 #endif
  1702 
  1703 static size_t pagesize;
  1704 
  1705 void *
  1706 valloc (size_t size)
  1707 {
  1708   if (pagesize == 0)
  1709     pagesize = getpagesize ();
  1710 
  1711   return aligned_alloc (pagesize, size);
  1712 }
  1713 
  1714 #ifdef HYBRID_MALLOC
  1715 #undef malloc
  1716 #undef realloc
  1717 #undef calloc
  1718 #undef aligned_alloc
  1719 #undef free
  1720 
  1721 /* Declare system malloc and friends.  */
  1722 extern void *malloc (size_t size);
  1723 extern void *realloc (void *ptr, size_t size);
  1724 extern void *calloc (size_t nmemb, size_t size);
  1725 extern void free (void *ptr);
  1726 #ifdef HAVE_ALIGNED_ALLOC
  1727 extern void *aligned_alloc (size_t alignment, size_t size);
  1728 #elif defined HAVE_POSIX_MEMALIGN
  1729 extern int posix_memalign (void **memptr, size_t alignment, size_t size);
  1730 #endif
  1731 
  1732 /* See the comments near the beginning of this file for explanations
  1733    of the following functions. */
  1734 
  1735 void *
  1736 hybrid_malloc (size_t size)
  1737 {
  1738   if (DUMPED)
  1739     return malloc (size);
  1740   return gmalloc (size);
  1741 }
  1742 
  1743 void *
  1744 hybrid_calloc (size_t nmemb, size_t size)
  1745 {
  1746   if (DUMPED)
  1747     return calloc (nmemb, size);
  1748   return gcalloc (nmemb, size);
  1749 }
  1750 
  1751 void
  1752 hybrid_free (void *ptr)
  1753 {
  1754   if (!DUMPED)
  1755     gfree (ptr);
  1756   else if (!ALLOCATED_BEFORE_DUMPING (ptr))
  1757     free (ptr);
  1758   /* Otherwise the dumped emacs is trying to free something allocated
  1759      before dumping; do nothing.  */
  1760   return;
  1761 }
  1762 
  1763 #if defined HAVE_ALIGNED_ALLOC || defined HAVE_POSIX_MEMALIGN
  1764 void *
  1765 hybrid_aligned_alloc (size_t alignment, size_t size)
  1766 {
  1767   if (!DUMPED)
  1768     return galigned_alloc (alignment, size);
  1769   /* The following is copied from alloc.c */
  1770 #ifdef HAVE_ALIGNED_ALLOC
  1771   return aligned_alloc (alignment, size);
  1772 #else  /* HAVE_POSIX_MEMALIGN */
  1773   void *p;
  1774   return posix_memalign (&p, alignment, size) == 0 ? p : 0;
  1775 #endif
  1776 }
  1777 #endif
  1778 
  1779 void *
  1780 hybrid_realloc (void *ptr, size_t size)
  1781 {
  1782   void *result;
  1783   int type;
  1784   size_t block, oldsize;
  1785 
  1786   if (!DUMPED)
  1787     return grealloc (ptr, size);
  1788   if (!ALLOCATED_BEFORE_DUMPING (ptr))
  1789     return realloc (ptr, size);
  1790 
  1791   /* The dumped emacs is trying to realloc storage allocated before
  1792    dumping.  We just malloc new space and copy the data.  */
  1793   if (size == 0 || ptr == NULL)
  1794     return malloc (size);
  1795   block = ((char *) ptr - _heapbase) / BLOCKSIZE + 1;
  1796   type = _heapinfo[block].busy.type;
  1797   oldsize =
  1798     type == 0 ? _heapinfo[block].busy.info.size * BLOCKSIZE
  1799     : (size_t) 1 << type;
  1800   result = malloc (size);
  1801   if (result)
  1802     return memcpy (result, ptr, min (oldsize, size));
  1803   return result;
  1804 }
  1805 
  1806 #ifdef HYBRID_GET_CURRENT_DIR_NAME
  1807 /* Defined in sysdep.c.  */
  1808 char *gget_current_dir_name (void);
  1809 
  1810 char *
  1811 hybrid_get_current_dir_name (void)
  1812 {
  1813   if (DUMPED)
  1814     return get_current_dir_name ();
  1815   return gget_current_dir_name ();
  1816 }
  1817 #endif
  1818 
  1819 #endif  /* HYBRID_MALLOC */
  1820 
  1821 #ifdef GC_MCHECK
  1822 
  1823 /* Standard debugging hooks for `malloc'.
  1824    Copyright 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc.
  1825    Written May 1989 by Mike Haertel.
  1826 
  1827 This library is free software; you can redistribute it and/or
  1828 modify it under the terms of the GNU General Public License as
  1829 published by the Free Software Foundation; either version 2 of the
  1830 License, or (at your option) any later version.
  1831 
  1832 This library is distributed in the hope that it will be useful,
  1833 but WITHOUT ANY WARRANTY; without even the implied warranty of
  1834 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  1835 General Public License for more details.
  1836 
  1837 You should have received a copy of the GNU General Public
  1838 License along with this library.  If not, see <https://www.gnu.org/licenses/>.
  1839 
  1840    The author may be reached (Email) at the address mike@ai.mit.edu,
  1841    or (US mail) as Mike Haertel c/o Free Software Foundation.  */
  1842 
  1843 #include <stdio.h>
  1844 
  1845 /* Old hook values.  */
  1846 static void (*old_free_hook) (void *ptr);
  1847 static void *(*old_malloc_hook) (size_t size);
  1848 static void *(*old_realloc_hook) (void *ptr, size_t size);
  1849 
  1850 /* Function to call when something awful happens.  */
  1851 static void (*abortfunc) (enum mcheck_status);
  1852 
  1853 /* Arbitrary magical numbers.  */
  1854 #define MAGICWORD       (SIZE_MAX / 11 ^ SIZE_MAX / 13 << 3)
  1855 #define MAGICFREE       (SIZE_MAX / 17 ^ SIZE_MAX / 19 << 4)
  1856 #define MAGICBYTE       ((char) 0xd7)
  1857 #define MALLOCFLOOD     ((char) 0x93)
  1858 #define FREEFLOOD       ((char) 0x95)
  1859 
  1860 struct hdr
  1861   {
  1862     size_t size;        /* Exact size requested by user.  */
  1863     size_t magic;       /* Magic number to check header integrity.  */
  1864   };
  1865 
  1866 static enum mcheck_status
  1867 checkhdr (const struct hdr *hdr)
  1868 {
  1869   enum mcheck_status status;
  1870   switch (hdr->magic)
  1871     {
  1872     default:
  1873       status = MCHECK_HEAD;
  1874       break;
  1875     case MAGICFREE:
  1876       status = MCHECK_FREE;
  1877       break;
  1878     case MAGICWORD:
  1879       if (((char *) &hdr[1])[hdr->size] != MAGICBYTE)
  1880         status = MCHECK_TAIL;
  1881       else
  1882         status = MCHECK_OK;
  1883       break;
  1884     }
  1885   if (status != MCHECK_OK)
  1886     (*abortfunc) (status);
  1887   return status;
  1888 }
  1889 
  1890 static void
  1891 freehook (void *ptr)
  1892 {
  1893   struct hdr *hdr;
  1894 
  1895   if (ptr)
  1896     {
  1897       struct alignlist *l;
  1898 
  1899       /* If the block was allocated by aligned_alloc, its real pointer
  1900          to free is recorded in _aligned_blocks; find that.  */
  1901       PROTECT_MALLOC_STATE (0);
  1902       LOCK_ALIGNED_BLOCKS ();
  1903       for (l = _aligned_blocks; l != NULL; l = l->next)
  1904         if (l->aligned == ptr)
  1905           {
  1906             l->aligned = NULL;  /* Mark the slot in the list as free.  */
  1907             ptr = l->exact;
  1908             break;
  1909           }
  1910       UNLOCK_ALIGNED_BLOCKS ();
  1911       PROTECT_MALLOC_STATE (1);
  1912 
  1913       hdr = ((struct hdr *) ptr) - 1;
  1914       checkhdr (hdr);
  1915       hdr->magic = MAGICFREE;
  1916       memset (ptr, FREEFLOOD, hdr->size);
  1917     }
  1918   else
  1919     hdr = NULL;
  1920 
  1921   __free_hook = old_free_hook;
  1922   free (hdr);
  1923   __free_hook = freehook;
  1924 }
  1925 
  1926 static void *
  1927 mallochook (size_t size)
  1928 {
  1929   struct hdr *hdr;
  1930 
  1931   __malloc_hook = old_malloc_hook;
  1932   hdr = malloc (sizeof *hdr + size + 1);
  1933   __malloc_hook = mallochook;
  1934   if (hdr == NULL)
  1935     return NULL;
  1936 
  1937   hdr->size = size;
  1938   hdr->magic = MAGICWORD;
  1939   ((char *) &hdr[1])[size] = MAGICBYTE;
  1940   return memset (hdr + 1, MALLOCFLOOD, size);
  1941 }
  1942 
  1943 static void *
  1944 reallochook (void *ptr, size_t size)
  1945 {
  1946   struct hdr *hdr = NULL;
  1947   size_t osize = 0;
  1948 
  1949   if (ptr)
  1950     {
  1951       hdr = ((struct hdr *) ptr) - 1;
  1952       osize = hdr->size;
  1953 
  1954       checkhdr (hdr);
  1955       if (size < osize)
  1956         memset ((char *) ptr + size, FREEFLOOD, osize - size);
  1957     }
  1958 
  1959   __free_hook = old_free_hook;
  1960   __malloc_hook = old_malloc_hook;
  1961   __realloc_hook = old_realloc_hook;
  1962   hdr = realloc (hdr, sizeof *hdr + size + 1);
  1963   __free_hook = freehook;
  1964   __malloc_hook = mallochook;
  1965   __realloc_hook = reallochook;
  1966   if (hdr == NULL)
  1967     return NULL;
  1968 
  1969   hdr->size = size;
  1970   hdr->magic = MAGICWORD;
  1971   ((char *) &hdr[1])[size] = MAGICBYTE;
  1972   if (size > osize)
  1973     memset ((char *) (hdr + 1) + osize, MALLOCFLOOD, size - osize);
  1974   return hdr + 1;
  1975 }
  1976 
  1977 static void
  1978 mabort (enum mcheck_status status)
  1979 {
  1980   const char *msg;
  1981   switch (status)
  1982     {
  1983     case MCHECK_OK:
  1984       msg = "memory is consistent, library is buggy";
  1985       break;
  1986     case MCHECK_HEAD:
  1987       msg = "memory clobbered before allocated block";
  1988       break;
  1989     case MCHECK_TAIL:
  1990       msg = "memory clobbered past end of allocated block";
  1991       break;
  1992     case MCHECK_FREE:
  1993       msg = "block freed twice";
  1994       break;
  1995     default:
  1996       msg = "bogus mcheck_status, library is buggy";
  1997       break;
  1998     }
  1999 #ifdef __GNU_LIBRARY__
  2000   __libc_fatal (msg);
  2001 #else
  2002   fprintf (stderr, "mcheck: %s\n", msg);
  2003   fflush (stderr);
  2004 # ifdef emacs
  2005   emacs_abort ();
  2006 # else
  2007   abort ();
  2008 # endif
  2009 #endif
  2010 }
  2011 
  2012 static int mcheck_used = 0;
  2013 
  2014 int
  2015 mcheck (void (*func) (enum mcheck_status))
  2016 {
  2017   abortfunc = (func != NULL) ? func : &mabort;
  2018 
  2019   /* These hooks may not be safely inserted if malloc is already in use.  */
  2020   if (!__malloc_initialized && !mcheck_used)
  2021     {
  2022       old_free_hook = __free_hook;
  2023       __free_hook = freehook;
  2024       old_malloc_hook = __malloc_hook;
  2025       __malloc_hook = mallochook;
  2026       old_realloc_hook = __realloc_hook;
  2027       __realloc_hook = reallochook;
  2028       mcheck_used = 1;
  2029     }
  2030 
  2031   return mcheck_used ? 0 : -1;
  2032 }
  2033 
  2034 enum mcheck_status
  2035 mprobe (void *ptr)
  2036 {
  2037   return mcheck_used ? checkhdr (ptr) : MCHECK_DISABLED;
  2038 }
  2039 
  2040 #endif /* GC_MCHECK */

/* [<][>][^][v][top][bottom][index][help] */