root/src/ralloc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. find_heap
  2. obtain
  3. relinquish
  4. find_bloc
  5. get_bloc
  6. relocate_blocs
  7. update_heap_bloc_correspondence
  8. resize_bloc
  9. free_bloc
  10. r_alloc_sbrk
  11. r_alloc
  12. r_alloc_free
  13. r_re_alloc
  14. r_alloc_reinit
  15. r_alloc_check
  16. r_alloc_reset_variable
  17. r_alloc_inhibit_buffer_relocation
  18. r_alloc_init

     1 /* Block-relocating memory allocator.
     2    Copyright (C) 1993, 1995, 2000-2023 Free Software Foundation, Inc.
     3 
     4 This file is part of GNU Emacs.
     5 
     6 GNU Emacs is free software: you can redistribute it and/or modify
     7 it under the terms of the GNU General Public License as published by
     8 the Free Software Foundation, either version 3 of the License, or (at
     9 your option) any later version.
    10 
    11 GNU Emacs is distributed in the hope that it will be useful,
    12 but WITHOUT ANY WARRANTY; without even the implied warranty of
    13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    14 GNU General Public License for more details.
    15 
    16 You should have received a copy of the GNU General Public License
    17 along with GNU Emacs.  If not, see <https://www.gnu.org/licenses/>.  */
    18 
    19 /* NOTES:
    20 
    21    Only relocate the blocs necessary for SIZE in r_alloc_sbrk,
    22    rather than all of them.  This means allowing for a possible
    23    hole between the first bloc and the end of malloc storage.  */
    24 
    25 #include <config.h>
    26 
    27 #include <stddef.h>
    28 
    29 #include "lisp.h"
    30 #include "blockinput.h"
    31 #include <unistd.h>
    32 
    33 #include "getpagesize.h"
    34 
    35 /* A flag to indicate whether we have initialized ralloc yet.  For
    36    Emacs's sake, please do not make this local to malloc_init; on some
    37    machines, the dumping procedure makes all static variables
    38    read-only.  On these machines, the word static is #defined to be
    39    the empty string, meaning that r_alloc_initialized becomes an
    40    automatic variable, and loses its value each time Emacs is started
    41    up.  */
    42 
    43 static int r_alloc_initialized = 0;
    44 
    45 static void r_alloc_init (void);
    46 
    47 
    48 /* Declarations for working with the malloc, ralloc, and system breaks.  */
    49 
    50 /* Function to set the real break value.  */
    51 void *(*real_morecore) (ptrdiff_t);
    52 
    53 /* The break value, as seen by malloc.  */
    54 static void *virtual_break_value;
    55 
    56 /* The address of the end of the last data in use by ralloc,
    57    including relocatable blocs as well as malloc data.  */
    58 static void *break_value;
    59 
    60 /* This is the size of a page.  We round memory requests to this boundary.  */
    61 static int page_size;
    62 
    63 /* Whenever we get memory from the system, get this many extra bytes.  This
    64    must be a multiple of page_size.  */
    65 static int extra_bytes;
    66 
    67 /* Macros for rounding.  Note that rounding to any value is possible
    68    by changing the definition of PAGE.  */
    69 #define PAGE (getpagesize ())
    70 #define PAGE_ROUNDUP(size) (((size_t) (size) + page_size - 1) \
    71                        & ~((size_t) (page_size - 1)))
    72 
    73 #define MEM_ALIGN sizeof (double)
    74 #define MEM_ROUNDUP(addr) (((size_t) (addr) + MEM_ALIGN - 1) \
    75                            & ~(MEM_ALIGN - 1))
    76 
    77 /* The hook `malloc' uses for the function which gets more space
    78    from the system.  */
    79 
    80 #ifdef HAVE_MALLOC_H
    81 # include <malloc.h>
    82 #endif
    83 #ifndef DOUG_LEA_MALLOC
    84 extern void *(*__morecore) (ptrdiff_t);
    85 #endif
    86 
    87 
    88 
    89 /***********************************************************************
    90                       Implementation using sbrk
    91  ***********************************************************************/
    92 
    93 /* Data structures of heaps and blocs.  */
    94 
    95 /* The relocatable objects, or blocs, and the malloc data
    96    both reside within one or more heaps.
    97    Each heap contains malloc data, running from `start' to `bloc_start',
    98    and relocatable objects, running from `bloc_start' to `free'.
    99 
   100    Relocatable objects may relocate within the same heap
   101    or may move into another heap; the heaps themselves may grow
   102    but they never move.
   103 
   104    We try to make just one heap and make it larger as necessary.
   105    But sometimes we can't do that, because we can't get contiguous
   106    space to add onto the heap.  When that happens, we start a new heap.  */
   107 
   108 typedef struct heap
   109 {
   110   struct heap *next;
   111   struct heap *prev;
   112   /* Start of memory range of this heap.  */
   113   void *start;
   114   /* End of memory range of this heap.  */
   115   void *end;
   116   /* Start of relocatable data in this heap.  */
   117   void *bloc_start;
   118   /* Start of unused space in this heap.  */
   119   void *free;
   120   /* First bloc in this heap.  */
   121   struct bp *first_bloc;
   122   /* Last bloc in this heap.  */
   123   struct bp *last_bloc;
   124 } *heap_ptr;
   125 
   126 #define NIL_HEAP ((heap_ptr) 0)
   127 
   128 /* This is the first heap object.
   129    If we need additional heap objects, each one resides at the beginning of
   130    the space it covers.   */
   131 static struct heap heap_base;
   132 
   133 /* Head and tail of the list of heaps.  */
   134 static heap_ptr first_heap, last_heap;
   135 
   136 /* These structures are allocated in the malloc arena.
   137    The linked list is kept in order of increasing '.data' members.
   138    The data blocks abut each other; if b->next is non-nil, then
   139    b->data + b->size == b->next->data.
   140 
   141    An element with variable==NULL denotes a freed block, which has not yet
   142    been collected.  They may only appear while r_alloc_freeze_level > 0,
   143    and will be freed when the arena is thawed.  Currently, these blocs are
   144    not reusable, while the arena is frozen.  Very inefficient.  */
   145 
   146 typedef struct bp
   147 {
   148   struct bp *next;
   149   struct bp *prev;
   150   void **variable;
   151   void *data;
   152   size_t size;
   153   void *new_data;               /* temporarily used for relocation */
   154   struct heap *heap;            /* Heap this bloc is in.  */
   155 } *bloc_ptr;
   156 
   157 #define NIL_BLOC ((bloc_ptr) 0)
   158 #define BLOC_PTR_SIZE (sizeof (struct bp))
   159 
   160 /* Head and tail of the list of relocatable blocs.  */
   161 static bloc_ptr first_bloc, last_bloc;
   162 
   163 static int use_relocatable_buffers;
   164 
   165 /* If >0, no relocation whatsoever takes place.  */
   166 static int r_alloc_freeze_level;
   167 
   168 
   169 /* Functions to get and return memory from the system.  */
   170 
   171 /* Find the heap that ADDRESS falls within.  */
   172 
   173 static heap_ptr
   174 find_heap (void *address)
   175 {
   176   heap_ptr heap;
   177 
   178   for (heap = last_heap; heap; heap = heap->prev)
   179     {
   180       if (heap->start <= address && address <= heap->end)
   181         return heap;
   182     }
   183 
   184   return NIL_HEAP;
   185 }
   186 
   187 /* Find SIZE bytes of space in a heap.
   188    Try to get them at ADDRESS (which must fall within some heap's range)
   189    if we can get that many within one heap.
   190 
   191    If enough space is not presently available in our reserve, this means
   192    getting more page-aligned space from the system.  If the returned space
   193    is not contiguous to the last heap, allocate a new heap, and append it
   194    to the heap list.
   195 
   196    obtain does not try to keep track of whether space is in use or not
   197    in use.  It just returns the address of SIZE bytes that fall within a
   198    single heap.  If you call obtain twice in a row with the same arguments,
   199    you typically get the same value.  It's the caller's responsibility to
   200    keep track of what space is in use.
   201 
   202    Return the address of the space if all went well, or zero if we couldn't
   203    allocate the memory.  */
   204 
   205 static void *
   206 obtain (void *address, size_t size)
   207 {
   208   heap_ptr heap;
   209   size_t already_available;
   210 
   211   /* Find the heap that ADDRESS falls within.  */
   212   for (heap = last_heap; heap; heap = heap->prev)
   213     {
   214       if (heap->start <= address && address <= heap->end)
   215         break;
   216     }
   217 
   218   if (! heap)
   219     emacs_abort ();
   220 
   221   /* If we can't fit SIZE bytes in that heap,
   222      try successive later heaps.  */
   223   while (heap && (char *) address + size > (char *) heap->end)
   224     {
   225       heap = heap->next;
   226       if (heap == NIL_HEAP)
   227         break;
   228       address = heap->bloc_start;
   229     }
   230 
   231   /* If we can't fit them within any existing heap,
   232      get more space.  */
   233   if (heap == NIL_HEAP)
   234     {
   235       void *new = real_morecore (0);
   236       size_t get;
   237 
   238       already_available = (char *) last_heap->end - (char *) address;
   239 
   240       if (new != last_heap->end)
   241         {
   242           /* Someone else called sbrk.  Make a new heap.  */
   243 
   244           heap_ptr new_heap = (heap_ptr) MEM_ROUNDUP (new);
   245           void *bloc_start = (void *) MEM_ROUNDUP ((void *) (new_heap + 1));
   246 
   247           if (real_morecore ((char *) bloc_start - (char *) new) != new)
   248             return 0;
   249 
   250           new_heap->start = new;
   251           new_heap->end = bloc_start;
   252           new_heap->bloc_start = bloc_start;
   253           new_heap->free = bloc_start;
   254           new_heap->next = NIL_HEAP;
   255           new_heap->prev = last_heap;
   256           new_heap->first_bloc = NIL_BLOC;
   257           new_heap->last_bloc = NIL_BLOC;
   258           last_heap->next = new_heap;
   259           last_heap = new_heap;
   260 
   261           address = bloc_start;
   262           already_available = 0;
   263         }
   264 
   265       /* Add space to the last heap (which we may have just created).
   266          Get some extra, so we can come here less often.  */
   267 
   268       get = size + extra_bytes - already_available;
   269       get = (char *) PAGE_ROUNDUP ((char *) last_heap->end + get)
   270         - (char *) last_heap->end;
   271 
   272       if (real_morecore (get) != last_heap->end)
   273         return 0;
   274 
   275       last_heap->end = (char *) last_heap->end + get;
   276     }
   277 
   278   return address;
   279 }
   280 
   281 /* Return unused heap space to the system
   282    if there is a lot of unused space now.
   283    This can make the last heap smaller;
   284    it can also eliminate the last heap entirely.  */
   285 
   286 static void
   287 relinquish (void)
   288 {
   289   register heap_ptr h;
   290   ptrdiff_t excess = 0;
   291 
   292   /* Add the amount of space beyond break_value
   293      in all heaps which have extend beyond break_value at all.  */
   294 
   295   for (h = last_heap; h && break_value < h->end; h = h->prev)
   296     {
   297       excess += (char *) h->end - (char *) ((break_value < h->bloc_start)
   298                                             ? h->bloc_start : break_value);
   299     }
   300 
   301   if (excess > extra_bytes * 2 && real_morecore (0) == last_heap->end)
   302     {
   303       /* Keep extra_bytes worth of empty space.
   304          And don't free anything unless we can free at least extra_bytes.  */
   305       excess -= extra_bytes;
   306 
   307       if ((char *) last_heap->end - (char *) last_heap->bloc_start <= excess)
   308         {
   309           heap_ptr lh_prev;
   310 
   311           /* This heap should have no blocs in it.  If it does, we
   312              cannot return it to the system.  */
   313           if (last_heap->first_bloc != NIL_BLOC
   314               || last_heap->last_bloc != NIL_BLOC)
   315             return;
   316 
   317           /* Return the last heap, with its header, to the system.  */
   318           excess = (char *) last_heap->end - (char *) last_heap->start;
   319           lh_prev = last_heap->prev;
   320           /* If the system doesn't want that much memory back, leave
   321              last_heap unaltered to reflect that.  This can occur if
   322              break_value is still within the original data segment.  */
   323           if (real_morecore (- excess) != 0)
   324             {
   325               last_heap = lh_prev;
   326               last_heap->next = NIL_HEAP;
   327             }
   328         }
   329       else
   330         {
   331           excess = ((char *) last_heap->end
   332                     - (char *) PAGE_ROUNDUP ((char *) last_heap->end - excess));
   333           /* If the system doesn't want that much memory back, leave
   334              the end of the last heap unchanged to reflect that.  This
   335              can occur if break_value is still within the original
   336              data segment.  */
   337           if (real_morecore (- excess) != 0)
   338             last_heap->end = (char *) last_heap->end - excess;
   339         }
   340     }
   341 }
   342 
   343 /* The meat - allocating, freeing, and relocating blocs.  */
   344 
   345 /* Find the bloc referenced by the address in PTR.  Returns a pointer
   346    to that block.  */
   347 
   348 static bloc_ptr
   349 find_bloc (void **ptr)
   350 {
   351   bloc_ptr p = first_bloc;
   352 
   353   while (p != NIL_BLOC)
   354     {
   355       /* Consistency check. Don't return inconsistent blocs.
   356          Don't abort here, as callers might be expecting this, but
   357          callers that always expect a bloc to be returned should abort
   358          if one isn't to avoid a memory corruption bug that is
   359          difficult to track down.  */
   360       if (p->variable == ptr && p->data == *ptr)
   361         return p;
   362 
   363       p = p->next;
   364     }
   365 
   366   return p;
   367 }
   368 
   369 /* Allocate a bloc of SIZE bytes and append it to the chain of blocs.
   370    Returns a pointer to the new bloc, or zero if we couldn't allocate
   371    memory for the new block.  */
   372 
   373 static bloc_ptr
   374 get_bloc (size_t size)
   375 {
   376   bloc_ptr new_bloc;
   377   heap_ptr heap;
   378 
   379   if (! (new_bloc = malloc (BLOC_PTR_SIZE))
   380       || ! (new_bloc->data = obtain (break_value, size)))
   381     {
   382       free (new_bloc);
   383 
   384       return 0;
   385     }
   386 
   387   break_value = (char *) new_bloc->data + size;
   388 
   389   new_bloc->size = size;
   390   new_bloc->next = NIL_BLOC;
   391   new_bloc->variable = NULL;
   392   new_bloc->new_data = 0;
   393 
   394   /* Record in the heap that this space is in use.  */
   395   heap = find_heap (new_bloc->data);
   396   heap->free = break_value;
   397 
   398   /* Maintain the correspondence between heaps and blocs.  */
   399   new_bloc->heap = heap;
   400   heap->last_bloc = new_bloc;
   401   if (heap->first_bloc == NIL_BLOC)
   402     heap->first_bloc = new_bloc;
   403 
   404   /* Put this bloc on the doubly-linked list of blocs.  */
   405   if (first_bloc)
   406     {
   407       new_bloc->prev = last_bloc;
   408       last_bloc->next = new_bloc;
   409       last_bloc = new_bloc;
   410     }
   411   else
   412     {
   413       first_bloc = last_bloc = new_bloc;
   414       new_bloc->prev = NIL_BLOC;
   415     }
   416 
   417   return new_bloc;
   418 }
   419 
   420 /* Calculate new locations of blocs in the list beginning with BLOC,
   421    relocating it to start at ADDRESS, in heap HEAP.  If enough space is
   422    not presently available in our reserve, call obtain for
   423    more space.
   424 
   425    Store the new location of each bloc in its new_data field.
   426    Do not touch the contents of blocs or break_value.  */
   427 
   428 static int
   429 relocate_blocs (bloc_ptr bloc, heap_ptr heap, void *address)
   430 {
   431   bloc_ptr b = bloc;
   432 
   433   /* No need to ever call this if arena is frozen, bug somewhere!  */
   434   if (r_alloc_freeze_level)
   435     emacs_abort ();
   436 
   437   while (b)
   438     {
   439       /* If bloc B won't fit within HEAP,
   440          move to the next heap and try again.  */
   441       while (heap && (char *) address + b->size > (char *) heap->end)
   442         {
   443           heap = heap->next;
   444           if (heap == NIL_HEAP)
   445             break;
   446           address = heap->bloc_start;
   447         }
   448 
   449       /* If BLOC won't fit in any heap,
   450          get enough new space to hold BLOC and all following blocs.  */
   451       if (heap == NIL_HEAP)
   452         {
   453           bloc_ptr tb = b;
   454           size_t s = 0;
   455 
   456           /* Add up the size of all the following blocs.  */
   457           while (tb != NIL_BLOC)
   458             {
   459               if (tb->variable)
   460                 s += tb->size;
   461 
   462               tb = tb->next;
   463             }
   464 
   465           /* Get that space.  */
   466           address = obtain (address, s);
   467           if (address == 0)
   468             return 0;
   469 
   470           heap = last_heap;
   471         }
   472 
   473       /* Record the new address of this bloc
   474          and update where the next bloc can start.  */
   475       b->new_data = address;
   476       if (b->variable)
   477         address = (char *) address + b->size;
   478       b = b->next;
   479     }
   480 
   481   return 1;
   482 }
   483 
   484 /* Update the records of which heaps contain which blocs, starting
   485    with heap HEAP and bloc BLOC.  */
   486 
   487 static void
   488 update_heap_bloc_correspondence (bloc_ptr bloc, heap_ptr heap)
   489 {
   490   register bloc_ptr b;
   491 
   492   /* Initialize HEAP's status to reflect blocs before BLOC.  */
   493   if (bloc != NIL_BLOC && bloc->prev != NIL_BLOC && bloc->prev->heap == heap)
   494     {
   495       /* The previous bloc is in HEAP.  */
   496       heap->last_bloc = bloc->prev;
   497       heap->free = (char *) bloc->prev->data + bloc->prev->size;
   498     }
   499   else
   500     {
   501       /* HEAP contains no blocs before BLOC.  */
   502       heap->first_bloc = NIL_BLOC;
   503       heap->last_bloc = NIL_BLOC;
   504       heap->free = heap->bloc_start;
   505     }
   506 
   507   /* Advance through blocs one by one.  */
   508   for (b = bloc; b != NIL_BLOC; b = b->next)
   509     {
   510       /* Advance through heaps, marking them empty,
   511          till we get to the one that B is in.  */
   512       while (heap)
   513         {
   514           if (heap->bloc_start <= b->data && b->data <= heap->end)
   515             break;
   516           heap = heap->next;
   517           /* We know HEAP is not null now,
   518              because there has to be space for bloc B.  */
   519           heap->first_bloc = NIL_BLOC;
   520           heap->last_bloc = NIL_BLOC;
   521           heap->free = heap->bloc_start;
   522         }
   523 
   524       /* Update HEAP's status for bloc B.  */
   525       heap->free = (char *) b->data + b->size;
   526       heap->last_bloc = b;
   527       if (heap->first_bloc == NIL_BLOC)
   528         heap->first_bloc = b;
   529 
   530       /* Record that B is in HEAP.  */
   531       b->heap = heap;
   532     }
   533 
   534   /* If there are any remaining heaps and no blocs left,
   535      mark those heaps as empty.  */
   536   heap = heap->next;
   537   while (heap)
   538     {
   539       heap->first_bloc = NIL_BLOC;
   540       heap->last_bloc = NIL_BLOC;
   541       heap->free = heap->bloc_start;
   542       heap = heap->next;
   543     }
   544 }
   545 
   546 /* Resize BLOC to SIZE bytes.  This relocates the blocs
   547    that come after BLOC in memory.  */
   548 
   549 static int
   550 resize_bloc (bloc_ptr bloc, size_t size)
   551 {
   552   bloc_ptr b;
   553   heap_ptr heap;
   554   void *address;
   555   size_t old_size;
   556 
   557   /* No need to ever call this if arena is frozen, bug somewhere!  */
   558   if (r_alloc_freeze_level)
   559     emacs_abort ();
   560 
   561   if (bloc == NIL_BLOC || size == bloc->size)
   562     return 1;
   563 
   564   for (heap = first_heap; heap != NIL_HEAP; heap = heap->next)
   565     {
   566       if (heap->bloc_start <= bloc->data && bloc->data <= heap->end)
   567         break;
   568     }
   569 
   570   if (heap == NIL_HEAP)
   571     emacs_abort ();
   572 
   573   old_size = bloc->size;
   574   bloc->size = size;
   575 
   576   /* Note that bloc could be moved into the previous heap.  */
   577   address = (bloc->prev ? (char *) bloc->prev->data + bloc->prev->size
   578              : (char *) first_heap->bloc_start);
   579   while (heap)
   580     {
   581       if (heap->bloc_start <= address && address <= heap->end)
   582         break;
   583       heap = heap->prev;
   584     }
   585 
   586   if (! relocate_blocs (bloc, heap, address))
   587     {
   588       bloc->size = old_size;
   589       return 0;
   590     }
   591 
   592   if (size > old_size)
   593     {
   594       for (b = last_bloc; b != bloc; b = b->prev)
   595         {
   596           if (!b->variable)
   597             {
   598               b->size = 0;
   599               b->data = b->new_data;
   600             }
   601           else
   602             {
   603               if (b->new_data != b->data)
   604                 memmove (b->new_data, b->data, b->size);
   605               *b->variable = b->data = b->new_data;
   606             }
   607         }
   608       if (!bloc->variable)
   609         {
   610           bloc->size = 0;
   611           bloc->data = bloc->new_data;
   612         }
   613       else
   614         {
   615           if (bloc->new_data != bloc->data)
   616             memmove (bloc->new_data, bloc->data, old_size);
   617           memset ((char *) bloc->new_data + old_size, 0, size - old_size);
   618           *bloc->variable = bloc->data = bloc->new_data;
   619         }
   620     }
   621   else
   622     {
   623       for (b = bloc; b != NIL_BLOC; b = b->next)
   624         {
   625           if (!b->variable)
   626             {
   627               b->size = 0;
   628               b->data = b->new_data;
   629             }
   630           else
   631             {
   632               if (b->new_data != b->data)
   633                 memmove (b->new_data, b->data, b->size);
   634               *b->variable = b->data = b->new_data;
   635             }
   636         }
   637     }
   638 
   639   update_heap_bloc_correspondence (bloc, heap);
   640 
   641   break_value = (last_bloc ? (char *) last_bloc->data + last_bloc->size
   642                  : (char *) first_heap->bloc_start);
   643   return 1;
   644 }
   645 
   646 /* Free BLOC from the chain of blocs, relocating any blocs above it.
   647    This may return space to the system.  */
   648 
   649 static void
   650 free_bloc (bloc_ptr bloc)
   651 {
   652   heap_ptr heap = bloc->heap;
   653   heap_ptr h;
   654 
   655   if (r_alloc_freeze_level)
   656     {
   657       bloc->variable = NULL;
   658       return;
   659     }
   660 
   661   resize_bloc (bloc, 0);
   662 
   663   if (bloc == first_bloc && bloc == last_bloc)
   664     {
   665       first_bloc = last_bloc = NIL_BLOC;
   666     }
   667   else if (bloc == last_bloc)
   668     {
   669       last_bloc = bloc->prev;
   670       last_bloc->next = NIL_BLOC;
   671     }
   672   else if (bloc == first_bloc)
   673     {
   674       first_bloc = bloc->next;
   675       first_bloc->prev = NIL_BLOC;
   676     }
   677   else
   678     {
   679       bloc->next->prev = bloc->prev;
   680       bloc->prev->next = bloc->next;
   681     }
   682 
   683   /* Sometimes, 'heap' obtained from bloc->heap above is not really a
   684      'heap' structure.  It can even be beyond the current break point,
   685      which will cause crashes when we dereference it below (see
   686      bug#12242).  Evidently, the reason is bloc allocations done while
   687      use_relocatable_buffers was non-positive, because additional
   688      memory we get then is not recorded in the heaps we manage.  If
   689      bloc->heap records such a "heap", we cannot (and don't need to)
   690      update its records.  So we validate the 'heap' value by making
   691      sure it is one of the heaps we manage via the heaps linked list,
   692      and don't touch a 'heap' that isn't found there.  This avoids
   693      accessing memory we know nothing about.  */
   694   for (h = first_heap; h != NIL_HEAP; h = h->next)
   695     if (heap == h)
   696       break;
   697 
   698   if (h)
   699     {
   700       /* Update the records of which blocs are in HEAP.  */
   701       if (heap->first_bloc == bloc)
   702         {
   703           if (bloc->next != 0 && bloc->next->heap == heap)
   704             heap->first_bloc = bloc->next;
   705           else
   706             heap->first_bloc = heap->last_bloc = NIL_BLOC;
   707         }
   708       if (heap->last_bloc == bloc)
   709         {
   710           if (bloc->prev != 0 && bloc->prev->heap == heap)
   711             heap->last_bloc = bloc->prev;
   712           else
   713             heap->first_bloc = heap->last_bloc = NIL_BLOC;
   714         }
   715     }
   716 
   717   relinquish ();
   718   free (bloc);
   719 }
   720 
   721 /* Interface routines.  */
   722 
   723 /* Obtain SIZE bytes of storage from the free pool, or the system, as
   724    necessary.  If relocatable blocs are in use, this means relocating
   725    them.  This function gets plugged into the GNU malloc's __morecore
   726    hook.
   727 
   728    We provide hysteresis, never relocating by less than extra_bytes.
   729 
   730    If we're out of memory, we should return zero, to imitate the other
   731    __morecore hook values - in particular, __default_morecore in the
   732    GNU malloc package.  */
   733 
   734 static void *
   735 r_alloc_sbrk (ptrdiff_t size)
   736 {
   737   bloc_ptr b;
   738   void *address;
   739 
   740   if (! r_alloc_initialized)
   741     r_alloc_init ();
   742 
   743   if (use_relocatable_buffers <= 0)
   744     return real_morecore (size);
   745 
   746   if (size == 0)
   747     return virtual_break_value;
   748 
   749   if (size > 0)
   750     {
   751       /* Allocate a page-aligned space.  GNU malloc would reclaim an
   752          extra space if we passed an unaligned one.  But we could
   753          not always find a space which is contiguous to the previous.  */
   754       void *new_bloc_start;
   755       heap_ptr h = first_heap;
   756       size_t get = PAGE_ROUNDUP (size);
   757 
   758       address = (void *) PAGE_ROUNDUP (virtual_break_value);
   759 
   760       /* Search the list upward for a heap which is large enough.  */
   761       while ((char *) h->end < (char *) MEM_ROUNDUP ((char *) address + get))
   762         {
   763           h = h->next;
   764           if (h == NIL_HEAP)
   765             break;
   766           address = (void *) PAGE_ROUNDUP (h->start);
   767         }
   768 
   769       /* If not found, obtain more space.  */
   770       if (h == NIL_HEAP)
   771         {
   772           get += extra_bytes + page_size;
   773 
   774           if (! obtain (address, get))
   775             return 0;
   776 
   777           if (first_heap == last_heap)
   778             address = (void *) PAGE_ROUNDUP (virtual_break_value);
   779           else
   780             address = (void *) PAGE_ROUNDUP (last_heap->start);
   781           h = last_heap;
   782         }
   783 
   784       new_bloc_start = (void *) MEM_ROUNDUP ((char *) address + get);
   785 
   786       if (first_heap->bloc_start < new_bloc_start)
   787         {
   788           /* This is no clean solution - no idea how to do it better.  */
   789           if (r_alloc_freeze_level)
   790             return NULL;
   791 
   792           /* There is a bug here: if the above obtain call succeeded, but the
   793              relocate_blocs call below does not succeed, we need to free
   794              the memory that we got with obtain.  */
   795 
   796           /* Move all blocs upward.  */
   797           if (! relocate_blocs (first_bloc, h, new_bloc_start))
   798             return 0;
   799 
   800           /* Note that (char *) (h + 1) <= (char *) new_bloc_start since
   801              get >= page_size, so the following does not destroy the heap
   802              header.  */
   803           for (b = last_bloc; b != NIL_BLOC; b = b->prev)
   804             {
   805               if (b->new_data != b->data)
   806                 memmove (b->new_data, b->data, b->size);
   807               *b->variable = b->data = b->new_data;
   808             }
   809 
   810           h->bloc_start = new_bloc_start;
   811 
   812           update_heap_bloc_correspondence (first_bloc, h);
   813         }
   814       if (h != first_heap)
   815         {
   816           /* Give up managing heaps below the one the new
   817              virtual_break_value points to.  */
   818           first_heap->prev = NIL_HEAP;
   819           first_heap->next = h->next;
   820           first_heap->start = h->start;
   821           first_heap->end = h->end;
   822           first_heap->free = h->free;
   823           first_heap->first_bloc = h->first_bloc;
   824           first_heap->last_bloc = h->last_bloc;
   825           first_heap->bloc_start = h->bloc_start;
   826 
   827           if (first_heap->next)
   828             first_heap->next->prev = first_heap;
   829           else
   830             last_heap = first_heap;
   831         }
   832 
   833       memset (address, 0, size);
   834     }
   835   else /* size < 0 */
   836     {
   837       size_t excess = ((char *) first_heap->bloc_start
   838                        - ((char *) virtual_break_value + size));
   839 
   840       address = virtual_break_value;
   841 
   842       if (r_alloc_freeze_level == 0 && excess > 2 * extra_bytes)
   843         {
   844           excess -= extra_bytes;
   845           first_heap->bloc_start
   846             = (void *) MEM_ROUNDUP ((char *) first_heap->bloc_start - excess);
   847 
   848           relocate_blocs (first_bloc, first_heap, first_heap->bloc_start);
   849 
   850           for (b = first_bloc; b != NIL_BLOC; b = b->next)
   851             {
   852               if (b->new_data != b->data)
   853                 memmove (b->new_data, b->data, b->size);
   854               *b->variable = b->data = b->new_data;
   855             }
   856         }
   857 
   858       if ((char *) virtual_break_value + size < (char *) first_heap->start)
   859         {
   860           /* We found an additional space below the first heap */
   861           first_heap->start = (void *) ((char *) virtual_break_value + size);
   862         }
   863     }
   864 
   865   virtual_break_value = (void *) ((char *) address + size);
   866   break_value = (last_bloc
   867                  ? (char *) last_bloc->data + last_bloc->size
   868                  : (char *) first_heap->bloc_start);
   869   if (size < 0)
   870     relinquish ();
   871 
   872   return address;
   873 }
   874 
   875 
   876 /* Allocate a relocatable bloc of storage of size SIZE.  A pointer to
   877    the data is returned in *PTR.  PTR is thus the address of some variable
   878    which will use the data area.
   879 
   880    The allocation of 0 bytes is valid.
   881    In case r_alloc_freeze_level is set, a best fit of unused blocs could be
   882    done before allocating a new area.  Not yet done.
   883 
   884    If we can't allocate the necessary memory, set *PTR to zero, and
   885    return zero.  */
   886 
   887 void *
   888 r_alloc (void **ptr, size_t size)
   889 {
   890   bloc_ptr new_bloc;
   891 
   892   if (! r_alloc_initialized)
   893     r_alloc_init ();
   894 
   895   new_bloc = get_bloc (MEM_ROUNDUP (size));
   896   if (new_bloc)
   897     {
   898       new_bloc->variable = ptr;
   899       *ptr = new_bloc->data;
   900     }
   901   else
   902     *ptr = 0;
   903 
   904   return *ptr;
   905 }
   906 
   907 /* Free a bloc of relocatable storage whose data is pointed to by PTR.
   908    Store 0 in *PTR to show there's no block allocated.  */
   909 
   910 void
   911 r_alloc_free (void **ptr)
   912 {
   913   bloc_ptr dead_bloc;
   914 
   915   if (! r_alloc_initialized)
   916     r_alloc_init ();
   917 
   918   dead_bloc = find_bloc (ptr);
   919   if (dead_bloc == NIL_BLOC)
   920     emacs_abort (); /* Double free? PTR not originally used to allocate?  */
   921 
   922   free_bloc (dead_bloc);
   923   *ptr = 0;
   924 
   925   refill_memory_reserve ();
   926 }
   927 
   928 /* Given a pointer at address PTR to relocatable data, resize it to SIZE.
   929    Do this by shifting all blocks above this one up in memory, unless
   930    SIZE is less than or equal to the current bloc size, in which case
   931    do nothing.
   932 
   933    In case r_alloc_freeze_level is set, a new bloc is allocated, and the
   934    memory copied to it.  Not very efficient.  We could traverse the
   935    bloc_list for a best fit of free blocs first.
   936 
   937    Change *PTR to reflect the new bloc, and return this value.
   938 
   939    If more memory cannot be allocated, then leave *PTR unchanged, and
   940    return zero.  */
   941 
   942 void *
   943 r_re_alloc (void **ptr, size_t size)
   944 {
   945   bloc_ptr bloc;
   946 
   947   if (! r_alloc_initialized)
   948     r_alloc_init ();
   949 
   950   if (!*ptr)
   951     return r_alloc (ptr, size);
   952   if (!size)
   953     {
   954       r_alloc_free (ptr);
   955       return r_alloc (ptr, 0);
   956     }
   957 
   958   bloc = find_bloc (ptr);
   959   if (bloc == NIL_BLOC)
   960     emacs_abort (); /* Already freed? PTR not originally used to allocate?  */
   961 
   962   if (size < bloc->size)
   963     {
   964       /* Wouldn't it be useful to actually resize the bloc here?  */
   965       /* I think so too, but not if it's too expensive...  */
   966       if ((bloc->size - MEM_ROUNDUP (size) >= page_size)
   967           && r_alloc_freeze_level == 0)
   968         {
   969           resize_bloc (bloc, MEM_ROUNDUP (size));
   970           /* Never mind if this fails, just do nothing...  */
   971           /* It *should* be infallible!  */
   972         }
   973     }
   974   else if (size > bloc->size)
   975     {
   976       if (r_alloc_freeze_level)
   977         {
   978           bloc_ptr new_bloc;
   979           new_bloc = get_bloc (MEM_ROUNDUP (size));
   980           if (new_bloc)
   981             {
   982               new_bloc->variable = ptr;
   983               *ptr = new_bloc->data;
   984               bloc->variable = NULL;
   985             }
   986           else
   987             return NULL;
   988         }
   989       else
   990         {
   991           if (! resize_bloc (bloc, MEM_ROUNDUP (size)))
   992             return NULL;
   993         }
   994     }
   995   return *ptr;
   996 }
   997 
   998 
   999 #ifdef DOUG_LEA_MALLOC
  1000 
  1001 /* Reinitialize the morecore hook variables after restarting a dumped
  1002    Emacs.  This is needed when using Doug Lea's malloc from GNU libc.  */
  1003 void
  1004 r_alloc_reinit (void)
  1005 {
  1006   /* Only do this if the hook has been reset, so that we don't get an
  1007      infinite loop, in case Emacs was linked statically.  */
  1008   if (__morecore != r_alloc_sbrk)
  1009     {
  1010       real_morecore = __morecore;
  1011       __morecore = r_alloc_sbrk;
  1012     }
  1013 }
  1014 
  1015 #endif /* emacs && DOUG_LEA_MALLOC */
  1016 
  1017 #ifdef DEBUG
  1018 
  1019 #include <assert.h>
  1020 
  1021 void
  1022 r_alloc_check (void)
  1023 {
  1024   int found = 0;
  1025   heap_ptr h, ph = 0;
  1026   bloc_ptr b, pb = 0;
  1027 
  1028   if (!r_alloc_initialized)
  1029     return;
  1030 
  1031   assert (first_heap);
  1032   assert (last_heap->end <= (void *) sbrk (0));
  1033   assert ((void *) first_heap < first_heap->start);
  1034   assert (first_heap->start <= virtual_break_value);
  1035   assert (virtual_break_value <= first_heap->end);
  1036 
  1037   for (h = first_heap; h; h = h->next)
  1038     {
  1039       assert (h->prev == ph);
  1040       assert ((void *) PAGE_ROUNDUP (h->end) == h->end);
  1041 #if 0 /* ??? The code in ralloc.c does not really try to ensure
  1042          the heap start has any sort of alignment.
  1043          Perhaps it should.  */
  1044       assert ((void *) MEM_ROUNDUP (h->start) == h->start);
  1045 #endif
  1046       assert ((void *) MEM_ROUNDUP (h->bloc_start) == h->bloc_start);
  1047       assert (h->start <= h->bloc_start && h->bloc_start <= h->end);
  1048 
  1049       if (ph)
  1050         {
  1051           assert (ph->end < h->start);
  1052           assert (h->start <= (void *) h && (void *) (h + 1) <= h->bloc_start);
  1053         }
  1054 
  1055       if (h->bloc_start <= break_value && break_value <= h->end)
  1056         found = 1;
  1057 
  1058       ph = h;
  1059     }
  1060 
  1061   assert (found);
  1062   assert (last_heap == ph);
  1063 
  1064   for (b = first_bloc; b; b = b->next)
  1065     {
  1066       assert (b->prev == pb);
  1067       assert ((void *) MEM_ROUNDUP (b->data) == b->data);
  1068       assert ((size_t) MEM_ROUNDUP (b->size) == b->size);
  1069 
  1070       ph = 0;
  1071       for (h = first_heap; h; h = h->next)
  1072         {
  1073           if (h->bloc_start <= b->data && b->data + b->size <= h->end)
  1074             break;
  1075           ph = h;
  1076         }
  1077 
  1078       assert (h);
  1079 
  1080       if (pb && pb->data + pb->size != b->data)
  1081         {
  1082           assert (ph && b->data == h->bloc_start);
  1083           while (ph)
  1084             {
  1085               if (ph->bloc_start <= pb->data
  1086                   && pb->data + pb->size <= ph->end)
  1087                 {
  1088                   assert (pb->data + pb->size + b->size > ph->end);
  1089                   break;
  1090                 }
  1091               else
  1092                 {
  1093                   assert (ph->bloc_start + b->size > ph->end);
  1094                 }
  1095               ph = ph->prev;
  1096             }
  1097         }
  1098       pb = b;
  1099     }
  1100 
  1101   assert (last_bloc == pb);
  1102 
  1103   if (last_bloc)
  1104     assert (last_bloc->data + last_bloc->size == break_value);
  1105   else
  1106     assert (first_heap->bloc_start == break_value);
  1107 }
  1108 
  1109 #endif /* DEBUG */
  1110 
  1111 /* Update the internal record of which variable points to some data to NEW.
  1112    Used by buffer-swap-text in Emacs to restore consistency after it
  1113    swaps the buffer text between two buffer objects.  The OLD pointer
  1114    is checked to ensure that memory corruption does not occur due to
  1115    misuse.  */
  1116 void
  1117 r_alloc_reset_variable (void **old, void **new)
  1118 {
  1119   bloc_ptr bloc = first_bloc;
  1120 
  1121   /* Find the bloc that corresponds to the data pointed to by pointer.
  1122      find_bloc cannot be used, as it has internal consistency checks
  1123      which fail when the variable needs resetting.  */
  1124   while (bloc != NIL_BLOC)
  1125     {
  1126       if (bloc->data == *new)
  1127         break;
  1128 
  1129       bloc = bloc->next;
  1130     }
  1131 
  1132   if (bloc == NIL_BLOC || bloc->variable != old)
  1133     emacs_abort (); /* Already freed? OLD not originally used to allocate?  */
  1134 
  1135   /* Update variable to point to the new location.  */
  1136   bloc->variable = new;
  1137 }
  1138 
  1139 void
  1140 r_alloc_inhibit_buffer_relocation (int inhibit)
  1141 {
  1142   if (use_relocatable_buffers > 1)
  1143     use_relocatable_buffers = 1;
  1144   if (inhibit)
  1145     use_relocatable_buffers--;
  1146   else if (use_relocatable_buffers < 1)
  1147     use_relocatable_buffers++;
  1148 }
  1149 
  1150 
  1151 /***********************************************************************
  1152                             Initialization
  1153  ***********************************************************************/
  1154 
  1155 /* Initialize various things for memory allocation.  */
  1156 
  1157 static void
  1158 r_alloc_init (void)
  1159 {
  1160   if (r_alloc_initialized)
  1161     return;
  1162   r_alloc_initialized = 1;
  1163 
  1164   page_size = PAGE;
  1165 #if !defined SYSTEM_MALLOC && !defined HYBRID_MALLOC
  1166   real_morecore = __morecore;
  1167   __morecore = r_alloc_sbrk;
  1168 
  1169   first_heap = last_heap = &heap_base;
  1170   first_heap->next = first_heap->prev = NIL_HEAP;
  1171   first_heap->start = first_heap->bloc_start
  1172     = virtual_break_value = break_value = real_morecore (0);
  1173   if (break_value == NULL)
  1174     emacs_abort ();
  1175 
  1176   extra_bytes = PAGE_ROUNDUP (50000);
  1177 #endif
  1178 
  1179 #ifdef DOUG_LEA_MALLOC
  1180   block_input ();
  1181   mallopt (M_TOP_PAD, 64 * 4096);
  1182   unblock_input ();
  1183 #else
  1184 #if !defined SYSTEM_MALLOC && !defined HYBRID_MALLOC
  1185   /* Give GNU malloc's morecore some hysteresis so that we move all
  1186      the relocatable blocks much less often.  The number used to be
  1187      64, but alloc.c would override that with 32 in code that was
  1188      removed when SYNC_INPUT became the only input handling mode.
  1189      That code was conditioned on !DOUG_LEA_MALLOC, so the call to
  1190      mallopt above is left unchanged.  (Actually, I think there's no
  1191      system nowadays that uses DOUG_LEA_MALLOC and also uses
  1192      REL_ALLOC.)  */
  1193   __malloc_extra_blocks = 32;
  1194 #endif
  1195 #endif
  1196 
  1197 #if !defined SYSTEM_MALLOC && !defined HYBRID_MALLOC
  1198   first_heap->end = (void *) PAGE_ROUNDUP (first_heap->start);
  1199 
  1200   /* The extra call to real_morecore guarantees that the end of the
  1201      address space is a multiple of page_size, even if page_size is
  1202      not really the page size of the system running the binary in
  1203      which page_size is stored.  This allows a binary to be built on a
  1204      system with one page size and run on a system with a smaller page
  1205      size.  */
  1206   real_morecore ((char *) first_heap->end - (char *) first_heap->start);
  1207 
  1208   /* Clear the rest of the last page; this memory is in our address space
  1209      even though it is after the sbrk value.  */
  1210   /* Doubly true, with the additional call that explicitly adds the
  1211      rest of that page to the address space.  */
  1212   memset (first_heap->start, 0,
  1213           (char *) first_heap->end - (char *) first_heap->start);
  1214   virtual_break_value = break_value = first_heap->bloc_start = first_heap->end;
  1215 #endif
  1216 
  1217   use_relocatable_buffers = 1;
  1218 }

/* [<][>][^][v][top][bottom][index][help] */