root/src/profiler.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. saturated_add
  2. make_log
  3. approximate_median
  4. evict_lower_half
  5. record_backtrace
  6. handle_profiler_signal
  7. deliver_profiler_signal
  8. setup_cpu_timer
  9. DEFUN
  10. DEFUN
  11. DEFUN
  12. DEFUN
  13. DEFUN
  14. DEFUN
  15. DEFUN
  16. DEFUN
  17. malloc_probe
  18. cmpfn_profiler
  19. hashfn_profiler
  20. syms_of_profiler
  21. syms_of_profiler_for_pdumper

     1 /* Profiler implementation.
     2 
     3 Copyright (C) 2012-2023 Free Software Foundation, Inc.
     4 
     5 This file is part of GNU Emacs.
     6 
     7 GNU Emacs is free software: you can redistribute it and/or modify
     8 it under the terms of the GNU General Public License as published by
     9 the Free Software Foundation, either version 3 of the License, or (at
    10 your option) any later version.
    11 
    12 GNU Emacs is distributed in the hope that it will be useful,
    13 but WITHOUT ANY WARRANTY; without even the implied warranty of
    14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    15 GNU General Public License for more details.
    16 
    17 You should have received a copy of the GNU General Public License
    18 along with GNU Emacs.  If not, see <https://www.gnu.org/licenses/>.  */
    19 
    20 #include <config.h>
    21 #include "lisp.h"
    22 #include "syssignal.h"
    23 #include "systime.h"
    24 #include "pdumper.h"
    25 
    26 /* Return A + B, but return the maximum fixnum if the result would overflow.
    27    Assume A and B are nonnegative and in fixnum range.  */
    28 
    29 static EMACS_INT
    30 saturated_add (EMACS_INT a, EMACS_INT b)
    31 {
    32   return min (a + b, MOST_POSITIVE_FIXNUM);
    33 }
    34 
    35 /* Logs.  */
    36 
    37 typedef struct Lisp_Hash_Table log_t;
    38 
    39 static Lisp_Object cmpfn_profiler (Lisp_Object, Lisp_Object,
    40                                    struct Lisp_Hash_Table *);
    41 static Lisp_Object hashfn_profiler (Lisp_Object, struct Lisp_Hash_Table *);
    42 
    43 static const struct hash_table_test hashtest_profiler =
    44   {
    45    LISPSYM_INITIALLY (Qprofiler_backtrace_equal),
    46    LISPSYM_INITIALLY (Qnil) /* user_hash_function */,
    47    LISPSYM_INITIALLY (Qnil) /* user_cmp_function */,
    48    cmpfn_profiler,
    49    hashfn_profiler,
    50   };
    51 
    52 static Lisp_Object
    53 make_log (void)
    54 {
    55   /* We use a standard Elisp hash-table object, but we use it in
    56      a special way.  This is OK as long as the object is not exposed
    57      to Elisp, i.e. until it is returned by *-profiler-log, after which
    58      it can't be used any more.  */
    59   EMACS_INT heap_size
    60     = clip_to_bounds (0, profiler_log_size, MOST_POSITIVE_FIXNUM);
    61   ptrdiff_t max_stack_depth
    62     = clip_to_bounds (0, profiler_max_stack_depth, PTRDIFF_MAX);;
    63   Lisp_Object log = make_hash_table (hashtest_profiler, heap_size,
    64                                      DEFAULT_REHASH_SIZE,
    65                                      DEFAULT_REHASH_THRESHOLD,
    66                                      Qnil, false);
    67   struct Lisp_Hash_Table *h = XHASH_TABLE (log);
    68 
    69   /* What is special about our hash-tables is that the values are pre-filled
    70      with the vectors we'll use as keys.  */
    71   ptrdiff_t i = ASIZE (h->key_and_value) >> 1;
    72   while (i > 0)
    73     set_hash_value_slot (h, --i, make_nil_vector (max_stack_depth));
    74   return log;
    75 }
    76 
    77 /* Evict the least used half of the hash_table.
    78 
    79    When the table is full, we have to evict someone.
    80    The easiest and most efficient is to evict the value we're about to add
    81    (i.e. once the table is full, stop sampling).
    82 
    83    We could also pick the element with the lowest count and evict it,
    84    but finding it is O(N) and for that amount of work we get very
    85    little in return: for the next sample, this latest sample will have
    86    count==1 and will hence be a prime candidate for eviction :-(
    87 
    88    So instead, we take O(N) time to eliminate more or less half of the
    89    entries (the half with the lowest counts).  So we get an amortized
    90    cost of O(1) and we get O(N) time for a new entry to grow larger
    91    than the other least counts before a new round of eviction.  */
    92 
    93 static EMACS_INT approximate_median (log_t *log,
    94                                      ptrdiff_t start, ptrdiff_t size)
    95 {
    96   eassert (size > 0);
    97   if (size < 2)
    98     return XFIXNUM (HASH_VALUE (log, start));
    99   if (size < 3)
   100     /* Not an actual median, but better for our application than
   101        choosing either of the two numbers.  */
   102     return ((XFIXNUM (HASH_VALUE (log, start))
   103              + XFIXNUM (HASH_VALUE (log, start + 1)))
   104             / 2);
   105   else
   106     {
   107       ptrdiff_t newsize = size / 3;
   108       ptrdiff_t start2 = start + newsize;
   109       EMACS_INT i1 = approximate_median (log, start, newsize);
   110       EMACS_INT i2 = approximate_median (log, start2, newsize);
   111       EMACS_INT i3 = approximate_median (log, start2 + newsize,
   112                                          size - 2 * newsize);
   113       return (i1 < i2
   114               ? (i2 < i3 ? i2 : (i1 < i3 ? i3 : i1))
   115               : (i1 < i3 ? i1 : (i2 < i3 ? i3 : i2)));
   116     }
   117 }
   118 
   119 static void evict_lower_half (log_t *log)
   120 {
   121   ptrdiff_t size = ASIZE (log->key_and_value) / 2;
   122   EMACS_INT median = approximate_median (log, 0, size);
   123 
   124   for (ptrdiff_t i = 0; i < size; i++)
   125     /* Evict not only values smaller but also values equal to the median,
   126        so as to make sure we evict something no matter what.  */
   127     if (XFIXNUM (HASH_VALUE (log, i)) <= median)
   128       {
   129         Lisp_Object key = HASH_KEY (log, i);
   130         { /* FIXME: we could make this more efficient.  */
   131           Lisp_Object tmp;
   132           XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr.  */
   133           Fremhash (key, tmp);
   134         }
   135         eassert (BASE_EQ (Qunbound, HASH_KEY (log, i)));
   136         eassert (log->next_free == i);
   137 
   138         eassert (VECTORP (key));
   139         for (ptrdiff_t j = 0; j < ASIZE (key); j++)
   140           ASET (key, j, Qnil);
   141 
   142         set_hash_value_slot (log, i, key);
   143       }
   144 }
   145 
   146 /* Record the current backtrace in LOG.  COUNT is the weight of this
   147    current backtrace: interrupt counts for CPU, and the allocation
   148    size for memory.  */
   149 
   150 static void
   151 record_backtrace (log_t *log, EMACS_INT count)
   152 {
   153   if (log->next_free < 0)
   154     /* FIXME: transfer the evicted counts to a special entry rather
   155        than dropping them on the floor.  */
   156     evict_lower_half (log);
   157   ptrdiff_t index = log->next_free;
   158 
   159   /* Get a "working memory" vector.  */
   160   Lisp_Object backtrace = HASH_VALUE (log, index);
   161   eassert (BASE_EQ (Qunbound, HASH_KEY (log, index)));
   162   get_backtrace (backtrace);
   163 
   164   { /* We basically do a `gethash+puthash' here, except that we have to be
   165        careful to avoid memory allocation since we're in a signal
   166        handler, and we optimize the code to try and avoid computing the
   167        hash+lookup twice.  See fns.c:Fputhash for reference.  */
   168     Lisp_Object hash;
   169     ptrdiff_t j = hash_lookup (log, backtrace, &hash);
   170     if (j >= 0)
   171       {
   172         EMACS_INT old_val = XFIXNUM (HASH_VALUE (log, j));
   173         EMACS_INT new_val = saturated_add (old_val, count);
   174         set_hash_value_slot (log, j, make_fixnum (new_val));
   175       }
   176     else
   177       { /* BEWARE!  hash_put in general can allocate memory.
   178            But currently it only does that if log->next_free is -1.  */
   179         eassert (0 <= log->next_free);
   180         ptrdiff_t j = hash_put (log, backtrace, make_fixnum (count), hash);
   181         /* Let's make sure we've put `backtrace' right where it
   182            already was to start with.  */
   183         eassert (index == j);
   184 
   185         /* FIXME: If the hash-table is almost full, we should set
   186            some global flag so that some Elisp code can offload its
   187            data elsewhere, so as to avoid the eviction code.
   188            There are 2 ways to do that, AFAICT:
   189            - Set a flag checked in maybe_quit, such that maybe_quit can then
   190              call Fprofiler_cpu_log and stash the full log for later use.
   191            - Set a flag check in post-gc-hook, so that Elisp code can call
   192              profiler-cpu-log.  That gives us more flexibility since that
   193              Elisp code can then do all kinds of fun stuff like write
   194              the log to disk.  Or turn it right away into a call tree.
   195            Of course, using Elisp is generally preferable, but it may
   196            take longer until we get a chance to run the Elisp code, so
   197            there's more risk that the table will get full before we
   198            get there.  */
   199       }
   200   }
   201 }
   202 
   203 /* Sampling profiler.  */
   204 
   205 #ifdef PROFILER_CPU_SUPPORT
   206 
   207 /* The profiler timer and whether it was properly initialized, if
   208    POSIX timers are available.  */
   209 #ifdef HAVE_ITIMERSPEC
   210 static timer_t profiler_timer;
   211 static bool profiler_timer_ok;
   212 #endif
   213 
   214 /* Status of sampling profiler.  */
   215 static enum profiler_cpu_running
   216   { NOT_RUNNING,
   217 #ifdef HAVE_ITIMERSPEC
   218     TIMER_SETTIME_RUNNING,
   219 #endif
   220     SETITIMER_RUNNING
   221   }
   222   profiler_cpu_running;
   223 
   224 /* Hash-table log of CPU profiler.  */
   225 static Lisp_Object cpu_log;
   226 
   227 /* Separate counter for the time spent in the GC.  */
   228 static EMACS_INT cpu_gc_count;
   229 
   230 /* The current sampling interval in nanoseconds.  */
   231 static EMACS_INT current_sampling_interval;
   232 
   233 /* Signal handler for sampling profiler.  */
   234 
   235 static void
   236 handle_profiler_signal (int signal)
   237 {
   238   if (EQ (backtrace_top_function (), QAutomatic_GC))
   239     /* Special case the time-count inside GC because the hash-table
   240        code is not prepared to be used while the GC is running.
   241        More specifically it uses ASIZE at many places where it does
   242        not expect the ARRAY_MARK_FLAG to be set.  We could try and
   243        harden the hash-table code, but it doesn't seem worth the
   244        effort.  */
   245     cpu_gc_count = saturated_add (cpu_gc_count, 1);
   246   else
   247     {
   248       EMACS_INT count = 1;
   249 #if defined HAVE_ITIMERSPEC && defined HAVE_TIMER_GETOVERRUN
   250       if (profiler_timer_ok)
   251         {
   252           int overruns = timer_getoverrun (profiler_timer);
   253           eassert (overruns >= 0);
   254           count += overruns;
   255         }
   256 #endif
   257       eassert (HASH_TABLE_P (cpu_log));
   258       record_backtrace (XHASH_TABLE (cpu_log), count);
   259     }
   260 }
   261 
   262 static void
   263 deliver_profiler_signal (int signal)
   264 {
   265   deliver_process_signal (signal, handle_profiler_signal);
   266 }
   267 
   268 static int
   269 setup_cpu_timer (Lisp_Object sampling_interval)
   270 {
   271   int billion = 1000000000;
   272 
   273   if (! RANGED_FIXNUMP (1, sampling_interval,
   274                          (TYPE_MAXIMUM (time_t) < EMACS_INT_MAX / billion
   275                           ? ((EMACS_INT) TYPE_MAXIMUM (time_t) * billion
   276                              + (billion - 1))
   277                           : EMACS_INT_MAX)))
   278     return -1;
   279 
   280   current_sampling_interval = XFIXNUM (sampling_interval);
   281   struct timespec interval
   282     = make_timespec (current_sampling_interval / billion,
   283                      current_sampling_interval % billion);
   284   struct sigaction action;
   285   emacs_sigaction_init (&action, deliver_profiler_signal);
   286   sigaction (SIGPROF, &action, 0);
   287 
   288 #ifdef HAVE_ITIMERSPEC
   289   if (! profiler_timer_ok)
   290     {
   291       /* System clocks to try, in decreasing order of desirability.  */
   292       static clockid_t const system_clock[] = {
   293 #ifdef CLOCK_THREAD_CPUTIME_ID
   294         CLOCK_THREAD_CPUTIME_ID,
   295 #endif
   296 #ifdef CLOCK_PROCESS_CPUTIME_ID
   297         CLOCK_PROCESS_CPUTIME_ID,
   298 #endif
   299 #ifdef CLOCK_MONOTONIC
   300         CLOCK_MONOTONIC,
   301 #endif
   302         CLOCK_REALTIME
   303       };
   304       struct sigevent sigev;
   305       sigev.sigev_value.sival_ptr = &profiler_timer;
   306       sigev.sigev_signo = SIGPROF;
   307       sigev.sigev_notify = SIGEV_SIGNAL;
   308 
   309       for (int i = 0; i < ARRAYELTS (system_clock); i++)
   310         if (timer_create (system_clock[i], &sigev, &profiler_timer) == 0)
   311           {
   312             profiler_timer_ok = true;
   313             break;
   314           }
   315     }
   316 
   317   if (profiler_timer_ok)
   318     {
   319       struct itimerspec ispec;
   320       ispec.it_value = ispec.it_interval = interval;
   321       if (timer_settime (profiler_timer, 0, &ispec, 0) == 0)
   322         return TIMER_SETTIME_RUNNING;
   323     }
   324 #endif
   325 
   326 #ifdef HAVE_SETITIMER
   327   struct itimerval timer;
   328   timer.it_value = timer.it_interval = make_timeval (interval);
   329   if (setitimer (ITIMER_PROF, &timer, 0) == 0)
   330     return SETITIMER_RUNNING;
   331 #endif
   332 
   333   return NOT_RUNNING;
   334 }
   335 
   336 DEFUN ("profiler-cpu-start", Fprofiler_cpu_start, Sprofiler_cpu_start,
   337        1, 1, 0,
   338        doc: /* Start or restart the cpu profiler.
   339 It takes call-stack samples each SAMPLING-INTERVAL nanoseconds, approximately.
   340 See also `profiler-log-size' and `profiler-max-stack-depth'.  */)
   341   (Lisp_Object sampling_interval)
   342 {
   343   if (profiler_cpu_running)
   344     error ("CPU profiler is already running");
   345 
   346   if (NILP (cpu_log))
   347     {
   348       cpu_gc_count = 0;
   349       cpu_log = make_log ();
   350     }
   351 
   352   int status = setup_cpu_timer (sampling_interval);
   353   if (status < 0)
   354     {
   355       profiler_cpu_running = NOT_RUNNING;
   356       error ("Invalid sampling interval");
   357     }
   358   else
   359     {
   360       profiler_cpu_running = status;
   361       if (! profiler_cpu_running)
   362         error ("Unable to start profiler timer");
   363     }
   364 
   365   return Qt;
   366 }
   367 
   368 DEFUN ("profiler-cpu-stop", Fprofiler_cpu_stop, Sprofiler_cpu_stop,
   369        0, 0, 0,
   370        doc: /* Stop the cpu profiler.  The profiler log is not affected.
   371 Return non-nil if the profiler was running.  */)
   372   (void)
   373 {
   374   switch (profiler_cpu_running)
   375     {
   376     case NOT_RUNNING:
   377       return Qnil;
   378 
   379 #ifdef HAVE_ITIMERSPEC
   380     case TIMER_SETTIME_RUNNING:
   381       {
   382         struct itimerspec disable = { 0, };
   383         timer_settime (profiler_timer, 0, &disable, 0);
   384       }
   385       break;
   386 #endif
   387 
   388 #ifdef HAVE_SETITIMER
   389     case SETITIMER_RUNNING:
   390       {
   391         struct itimerval disable = { 0, };
   392         setitimer (ITIMER_PROF, &disable, 0);
   393       }
   394       break;
   395 #endif
   396     }
   397 
   398   signal (SIGPROF, SIG_IGN);
   399   profiler_cpu_running = NOT_RUNNING;
   400   return Qt;
   401 }
   402 
   403 DEFUN ("profiler-cpu-running-p",
   404        Fprofiler_cpu_running_p, Sprofiler_cpu_running_p,
   405        0, 0, 0,
   406        doc: /* Return non-nil if cpu profiler is running.  */)
   407   (void)
   408 {
   409   return profiler_cpu_running ? Qt : Qnil;
   410 }
   411 
   412 DEFUN ("profiler-cpu-log", Fprofiler_cpu_log, Sprofiler_cpu_log,
   413        0, 0, 0,
   414        doc: /* Return the current cpu profiler log.
   415 The log is a hash-table mapping backtraces to counters which represent
   416 the amount of time spent at those points.  Every backtrace is a vector
   417 of functions, where the last few elements may be nil.
   418 Before returning, a new log is allocated for future samples.  */)
   419   (void)
   420 {
   421   Lisp_Object result = cpu_log;
   422   /* Here we're making the log visible to Elisp, so it's not safe any
   423      more for our use afterwards since we can't rely on its special
   424      pre-allocated keys anymore.  So we have to allocate a new one.  */
   425   cpu_log = profiler_cpu_running ? make_log () : Qnil;
   426   Fputhash (make_vector (1, QAutomatic_GC),
   427             make_fixnum (cpu_gc_count),
   428             result);
   429   cpu_gc_count = 0;
   430   return result;
   431 }
   432 #endif /* PROFILER_CPU_SUPPORT */
   433 
   434 /* Memory profiler.  */
   435 
   436 /* True if memory profiler is running.  */
   437 bool profiler_memory_running;
   438 
   439 static Lisp_Object memory_log;
   440 
   441 DEFUN ("profiler-memory-start", Fprofiler_memory_start, Sprofiler_memory_start,
   442        0, 0, 0,
   443        doc: /* Start/restart the memory profiler.
   444 The memory profiler will take samples of the call-stack whenever a new
   445 allocation takes place.  Note that most small allocations only trigger
   446 the profiler occasionally.
   447 See also `profiler-log-size' and `profiler-max-stack-depth'.  */)
   448   (void)
   449 {
   450   if (profiler_memory_running)
   451     error ("Memory profiler is already running");
   452 
   453   if (NILP (memory_log))
   454     memory_log = make_log ();
   455 
   456   profiler_memory_running = true;
   457 
   458   return Qt;
   459 }
   460 
   461 DEFUN ("profiler-memory-stop",
   462        Fprofiler_memory_stop, Sprofiler_memory_stop,
   463        0, 0, 0,
   464        doc: /* Stop the memory profiler.  The profiler log is not affected.
   465 Return non-nil if the profiler was running.  */)
   466   (void)
   467 {
   468   if (!profiler_memory_running)
   469     return Qnil;
   470   profiler_memory_running = false;
   471   return Qt;
   472 }
   473 
   474 DEFUN ("profiler-memory-running-p",
   475        Fprofiler_memory_running_p, Sprofiler_memory_running_p,
   476        0, 0, 0,
   477        doc: /* Return non-nil if memory profiler is running.  */)
   478   (void)
   479 {
   480   return profiler_memory_running ? Qt : Qnil;
   481 }
   482 
   483 DEFUN ("profiler-memory-log",
   484        Fprofiler_memory_log, Sprofiler_memory_log,
   485        0, 0, 0,
   486        doc: /* Return the current memory profiler log.
   487 The log is a hash-table mapping backtraces to counters which represent
   488 the amount of memory allocated at those points.  Every backtrace is a vector
   489 of functions, where the last few elements may be nil.
   490 Before returning, a new log is allocated for future samples.  */)
   491   (void)
   492 {
   493   Lisp_Object result = memory_log;
   494   /* Here we're making the log visible to Elisp , so it's not safe any
   495      more for our use afterwards since we can't rely on its special
   496      pre-allocated keys anymore.  So we have to allocate a new one.  */
   497   memory_log = profiler_memory_running ? make_log () : Qnil;
   498   return result;
   499 }
   500 
   501 
   502 /* Signals and probes.  */
   503 
   504 /* Record that the current backtrace allocated SIZE bytes.  */
   505 void
   506 malloc_probe (size_t size)
   507 {
   508   if (EQ (backtrace_top_function (), QAutomatic_GC)) /* bug#60237 */
   509     /* FIXME: We should do something like what we did with `cpu_gc_count`.  */
   510     return;
   511   eassert (HASH_TABLE_P (memory_log));
   512   record_backtrace (XHASH_TABLE (memory_log), min (size, MOST_POSITIVE_FIXNUM));
   513 }
   514 
   515 DEFUN ("function-equal", Ffunction_equal, Sfunction_equal, 2, 2, 0,
   516        doc: /* Return non-nil if F1 and F2 come from the same source.
   517 Used to determine if different closures are just different instances of
   518 the same lambda expression, or are really unrelated function.  */)
   519      (Lisp_Object f1, Lisp_Object f2)
   520 {
   521   bool res;
   522   if (EQ (f1, f2))
   523     res = true;
   524   else if (COMPILEDP (f1) && COMPILEDP (f2))
   525     res = EQ (AREF (f1, COMPILED_BYTECODE), AREF (f2, COMPILED_BYTECODE));
   526   else if (CONSP (f1) && CONSP (f2) && CONSP (XCDR (f1)) && CONSP (XCDR (f2))
   527            && EQ (Qclosure, XCAR (f1))
   528            && EQ (Qclosure, XCAR (f2)))
   529     res = EQ (XCDR (XCDR (f1)), XCDR (XCDR (f2)));
   530   else
   531     res = false;
   532   return res ? Qt : Qnil;
   533 }
   534 
   535 static Lisp_Object
   536 cmpfn_profiler (Lisp_Object bt1, Lisp_Object bt2, struct Lisp_Hash_Table *h)
   537 {
   538   if (EQ (bt1, bt2))
   539     return Qt;
   540   else if (VECTORP (bt1) && VECTORP (bt2))
   541     {
   542       ptrdiff_t l = ASIZE (bt1);
   543       if (l != ASIZE (bt2))
   544         return Qnil;
   545       for (ptrdiff_t i = 0; i < l; i++)
   546         if (NILP (Ffunction_equal (AREF (bt1, i), AREF (bt2, i))))
   547           return Qnil;
   548       return Qt;
   549     }
   550   else
   551     return Qnil;
   552 }
   553 
   554 static Lisp_Object
   555 hashfn_profiler (Lisp_Object bt, struct Lisp_Hash_Table *h)
   556 {
   557   EMACS_UINT hash;
   558   if (VECTORP (bt))
   559     {
   560       hash = 0;
   561       ptrdiff_t l = ASIZE (bt);
   562       for (ptrdiff_t i = 0; i < l; i++)
   563         {
   564           Lisp_Object f = AREF (bt, i);
   565           EMACS_UINT hash1
   566             = (COMPILEDP (f) ? XHASH (AREF (f, COMPILED_BYTECODE))
   567                : (CONSP (f) && CONSP (XCDR (f)) && EQ (Qclosure, XCAR (f)))
   568                ? XHASH (XCDR (XCDR (f))) : XHASH (f));
   569           hash = sxhash_combine (hash, hash1);
   570         }
   571     }
   572   else
   573     hash = XHASH (bt);
   574   return make_ufixnum (SXHASH_REDUCE (hash));
   575 }
   576 
   577 static void syms_of_profiler_for_pdumper (void);
   578 
   579 void
   580 syms_of_profiler (void)
   581 {
   582   DEFVAR_INT ("profiler-max-stack-depth", profiler_max_stack_depth,
   583               doc: /* Number of elements from the call-stack recorded in the log.  */);
   584   profiler_max_stack_depth = 16;
   585   DEFVAR_INT ("profiler-log-size", profiler_log_size,
   586               doc: /* Number of distinct call-stacks that can be recorded in a profiler log.
   587 If the log gets full, some of the least-seen call-stacks will be evicted
   588 to make room for new entries.  */);
   589   profiler_log_size = 10000;
   590 
   591   DEFSYM (Qprofiler_backtrace_equal, "profiler-backtrace-equal");
   592 
   593   defsubr (&Sfunction_equal);
   594 
   595 #ifdef PROFILER_CPU_SUPPORT
   596   profiler_cpu_running = NOT_RUNNING;
   597   cpu_log = Qnil;
   598   staticpro (&cpu_log);
   599   defsubr (&Sprofiler_cpu_start);
   600   defsubr (&Sprofiler_cpu_stop);
   601   defsubr (&Sprofiler_cpu_running_p);
   602   defsubr (&Sprofiler_cpu_log);
   603 #endif
   604   profiler_memory_running = false;
   605   memory_log = Qnil;
   606   staticpro (&memory_log);
   607   defsubr (&Sprofiler_memory_start);
   608   defsubr (&Sprofiler_memory_stop);
   609   defsubr (&Sprofiler_memory_running_p);
   610   defsubr (&Sprofiler_memory_log);
   611 
   612   pdumper_do_now_and_after_load (syms_of_profiler_for_pdumper);
   613 }
   614 
   615 static void
   616 syms_of_profiler_for_pdumper (void)
   617 {
   618   if (dumped_with_pdumper_p ())
   619     {
   620 #ifdef PROFILER_CPU_SUPPORT
   621       cpu_log = Qnil;
   622 #endif
   623       memory_log = Qnil;
   624     }
   625   else
   626     {
   627 #ifdef PROFILER_CPU_SUPPORT
   628       eassert (NILP (cpu_log));
   629 #endif
   630       eassert (NILP (memory_log));
   631     }
   632 
   633 }

/* [<][>][^][v][top][bottom][index][help] */