root/src/profiler.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. saturated_add
  2. make_log
  3. approximate_median
  4. evict_lower_half
  5. record_backtrace
  6. add_sample
  7. handle_profiler_signal
  8. deliver_profiler_signal
  9. setup_cpu_timer
  10. DEFUN
  11. DEFUN
  12. DEFUN
  13. export_log
  14. DEFUN
  15. DEFUN
  16. DEFUN
  17. DEFUN
  18. DEFUN
  19. malloc_probe
  20. cmpfn_profiler
  21. hashfn_profiler
  22. syms_of_profiler
  23. syms_of_profiler_for_pdumper

     1 /* Profiler implementation.
     2 
     3 Copyright (C) 2012-2023 Free Software Foundation, Inc.
     4 
     5 This file is part of GNU Emacs.
     6 
     7 GNU Emacs is free software: you can redistribute it and/or modify
     8 it under the terms of the GNU General Public License as published by
     9 the Free Software Foundation, either version 3 of the License, or (at
    10 your option) any later version.
    11 
    12 GNU Emacs is distributed in the hope that it will be useful,
    13 but WITHOUT ANY WARRANTY; without even the implied warranty of
    14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    15 GNU General Public License for more details.
    16 
    17 You should have received a copy of the GNU General Public License
    18 along with GNU Emacs.  If not, see <https://www.gnu.org/licenses/>.  */
    19 
    20 #include <config.h>
    21 #include "lisp.h"
    22 #include "syssignal.h"
    23 #include "systime.h"
    24 #include "pdumper.h"
    25 
    26 /* Return A + B, but return the maximum fixnum if the result would overflow.
    27    Assume A and B are nonnegative and in fixnum range.  */
    28 
    29 static EMACS_INT
    30 saturated_add (EMACS_INT a, EMACS_INT b)
    31 {
    32   return min (a + b, MOST_POSITIVE_FIXNUM);
    33 }
    34 
    35 /* Logs.  */
    36 
    37 typedef struct Lisp_Hash_Table log_t;
    38 
    39 static Lisp_Object cmpfn_profiler (Lisp_Object, Lisp_Object,
    40                                    struct Lisp_Hash_Table *);
    41 static Lisp_Object hashfn_profiler (Lisp_Object, struct Lisp_Hash_Table *);
    42 
    43 static const struct hash_table_test hashtest_profiler =
    44   {
    45    LISPSYM_INITIALLY (Qprofiler_backtrace_equal),
    46    LISPSYM_INITIALLY (Qnil) /* user_hash_function */,
    47    LISPSYM_INITIALLY (Qnil) /* user_cmp_function */,
    48    cmpfn_profiler,
    49    hashfn_profiler,
    50   };
    51 
    52 struct profiler_log {
    53   Lisp_Object log;
    54   EMACS_INT gc_count;  /* Samples taken during GC.  */
    55   EMACS_INT discarded; /* Samples evicted during table overflow.  */
    56 };
    57 
    58 static struct profiler_log
    59 make_log (void)
    60 {
    61   /* We use a standard Elisp hash-table object, but we use it in
    62      a special way.  This is OK as long as the object is not exposed
    63      to Elisp, i.e. until it is returned by *-profiler-log, after which
    64      it can't be used any more.  */
    65   EMACS_INT heap_size
    66     = clip_to_bounds (0, profiler_log_size, MOST_POSITIVE_FIXNUM);
    67   ptrdiff_t max_stack_depth
    68     = clip_to_bounds (0, profiler_max_stack_depth, PTRDIFF_MAX);;
    69   struct profiler_log log
    70     = { make_hash_table (hashtest_profiler, heap_size,
    71                          DEFAULT_REHASH_SIZE,
    72                          DEFAULT_REHASH_THRESHOLD,
    73                          Qnil, false),
    74         0, 0 };
    75   struct Lisp_Hash_Table *h = XHASH_TABLE (log.log);
    76 
    77   /* What is special about our hash-tables is that the values are pre-filled
    78      with the vectors we'll use as keys.  */
    79   ptrdiff_t i = ASIZE (h->key_and_value) >> 1;
    80   while (i > 0)
    81     set_hash_value_slot (h, --i, make_nil_vector (max_stack_depth));
    82   return log;
    83 }
    84 
    85 /* Evict the least used half of the hash_table.
    86 
    87    When the table is full, we have to evict someone.
    88    The easiest and most efficient is to evict the value we're about to add
    89    (i.e. once the table is full, stop sampling).
    90 
    91    We could also pick the element with the lowest count and evict it,
    92    but finding it is O(N) and for that amount of work we get very
    93    little in return: for the next sample, this latest sample will have
    94    count==1 and will hence be a prime candidate for eviction :-(
    95 
    96    So instead, we take O(N) time to eliminate more or less half of the
    97    entries (the half with the lowest counts).  So we get an amortized
    98    cost of O(1) and we get O(N) time for a new entry to grow larger
    99    than the other least counts before a new round of eviction.  */
   100 
   101 static EMACS_INT approximate_median (log_t *log,
   102                                      ptrdiff_t start, ptrdiff_t size)
   103 {
   104   eassert (size > 0);
   105   if (size < 2)
   106     return XFIXNUM (HASH_VALUE (log, start));
   107   if (size < 3)
   108     /* Not an actual median, but better for our application than
   109        choosing either of the two numbers.  */
   110     return ((XFIXNUM (HASH_VALUE (log, start))
   111              + XFIXNUM (HASH_VALUE (log, start + 1)))
   112             / 2);
   113   else
   114     {
   115       ptrdiff_t newsize = size / 3;
   116       ptrdiff_t start2 = start + newsize;
   117       EMACS_INT i1 = approximate_median (log, start, newsize);
   118       EMACS_INT i2 = approximate_median (log, start2, newsize);
   119       EMACS_INT i3 = approximate_median (log, start2 + newsize,
   120                                          size - 2 * newsize);
   121       return (i1 < i2
   122               ? (i2 < i3 ? i2 : (i1 < i3 ? i3 : i1))
   123               : (i1 < i3 ? i1 : (i2 < i3 ? i3 : i2)));
   124     }
   125 }
   126 
   127 static void evict_lower_half (struct profiler_log *plog)
   128 {
   129   log_t *log = XHASH_TABLE (plog->log);
   130   ptrdiff_t size = ASIZE (log->key_and_value) / 2;
   131   EMACS_INT median = approximate_median (log, 0, size);
   132 
   133   for (ptrdiff_t i = 0; i < size; i++)
   134     /* Evict not only values smaller but also values equal to the median,
   135        so as to make sure we evict something no matter what.  */
   136     if (XFIXNUM (HASH_VALUE (log, i)) <= median)
   137       {
   138         Lisp_Object key = HASH_KEY (log, i);
   139         EMACS_INT count = XFIXNUM (HASH_VALUE (log, i));
   140         plog->discarded = saturated_add (plog->discarded, count);
   141         { /* FIXME: we could make this more efficient.  */
   142           Lisp_Object tmp;
   143           XSET_HASH_TABLE (tmp, log); /* FIXME: Use make_lisp_ptr.  */
   144           Fremhash (key, tmp);
   145         }
   146         eassert (BASE_EQ (Qunbound, HASH_KEY (log, i)));
   147         eassert (log->next_free == i);
   148 
   149         eassert (VECTORP (key));
   150         for (ptrdiff_t j = 0; j < ASIZE (key); j++)
   151           ASET (key, j, Qnil);
   152 
   153         set_hash_value_slot (log, i, key);
   154       }
   155 }
   156 
   157 /* Record the current backtrace in LOG.  COUNT is the weight of this
   158    current backtrace: interrupt counts for CPU, and the allocation
   159    size for memory.  */
   160 
   161 static void
   162 record_backtrace (struct profiler_log *plog, EMACS_INT count)
   163 {
   164   eassert (HASH_TABLE_P (plog->log));
   165   log_t *log = XHASH_TABLE (plog->log);
   166   if (log->next_free < 0)
   167     evict_lower_half (plog);
   168   ptrdiff_t index = log->next_free;
   169 
   170   /* Get a "working memory" vector.  */
   171   Lisp_Object backtrace = HASH_VALUE (log, index);
   172   eassert (BASE_EQ (Qunbound, HASH_KEY (log, index)));
   173   get_backtrace (backtrace);
   174 
   175   { /* We basically do a `gethash+puthash' here, except that we have to be
   176        careful to avoid memory allocation since we're in a signal
   177        handler, and we optimize the code to try and avoid computing the
   178        hash+lookup twice.  See fns.c:Fputhash for reference.  */
   179     Lisp_Object hash;
   180     ptrdiff_t j = hash_lookup (log, backtrace, &hash);
   181     if (j >= 0)
   182       {
   183         EMACS_INT old_val = XFIXNUM (HASH_VALUE (log, j));
   184         EMACS_INT new_val = saturated_add (old_val, count);
   185         set_hash_value_slot (log, j, make_fixnum (new_val));
   186       }
   187     else
   188       { /* BEWARE!  hash_put in general can allocate memory.
   189            But currently it only does that if log->next_free is -1.  */
   190         eassert (0 <= log->next_free);
   191         ptrdiff_t j = hash_put (log, backtrace, make_fixnum (count), hash);
   192         /* Let's make sure we've put `backtrace' right where it
   193            already was to start with.  */
   194         eassert (index == j);
   195 
   196         /* FIXME: If the hash-table is almost full, we should set
   197            some global flag so that some Elisp code can offload its
   198            data elsewhere, so as to avoid the eviction code.
   199            There are 2 ways to do that, AFAICT:
   200            - Set a flag checked in maybe_quit, such that maybe_quit can then
   201              call Fprofiler_cpu_log and stash the full log for later use.
   202            - Set a flag check in post-gc-hook, so that Elisp code can call
   203              profiler-cpu-log.  That gives us more flexibility since that
   204              Elisp code can then do all kinds of fun stuff like write
   205              the log to disk.  Or turn it right away into a call tree.
   206            Of course, using Elisp is generally preferable, but it may
   207            take longer until we get a chance to run the Elisp code, so
   208            there's more risk that the table will get full before we
   209            get there.  */
   210       }
   211   }
   212 }
   213 
   214 /* Sampling profiler.  */
   215 
   216 #ifdef PROFILER_CPU_SUPPORT
   217 
   218 /* The profiler timer and whether it was properly initialized, if
   219    POSIX timers are available.  */
   220 #ifdef HAVE_ITIMERSPEC
   221 static timer_t profiler_timer;
   222 static bool profiler_timer_ok;
   223 #endif
   224 
   225 /* Status of sampling profiler.  */
   226 static enum profiler_cpu_running
   227   { NOT_RUNNING,
   228 #ifdef HAVE_ITIMERSPEC
   229     TIMER_SETTIME_RUNNING,
   230 #endif
   231     SETITIMER_RUNNING
   232   }
   233   profiler_cpu_running;
   234 
   235 /* Hash-table log of CPU profiler.  */
   236 static struct profiler_log cpu;
   237 
   238 /* Hash-table log of Memory profiler.  */
   239 static struct profiler_log memory;
   240 
   241 /* The current sampling interval in nanoseconds.  */
   242 static EMACS_INT current_sampling_interval;
   243 
   244 /* Signal handler for sampling profiler.  */
   245 
   246 static void
   247 add_sample (struct profiler_log *plog, EMACS_INT count)
   248 {
   249   if (EQ (backtrace_top_function (), QAutomatic_GC)) /* bug#60237 */
   250     /* Special case the time-count inside GC because the hash-table
   251        code is not prepared to be used while the GC is running.
   252        More specifically it uses ASIZE at many places where it does
   253        not expect the ARRAY_MARK_FLAG to be set.  We could try and
   254        harden the hash-table code, but it doesn't seem worth the
   255        effort.  */
   256     plog->gc_count = saturated_add (plog->gc_count, count);
   257   else
   258     record_backtrace (plog, count);
   259 }
   260 
   261 
   262 static void
   263 handle_profiler_signal (int signal)
   264 {
   265   EMACS_INT count = 1;
   266 #if defined HAVE_ITIMERSPEC && defined HAVE_TIMER_GETOVERRUN
   267   if (profiler_timer_ok)
   268     {
   269       int overruns = timer_getoverrun (profiler_timer);
   270       eassert (overruns >= 0);
   271       count += overruns;
   272     }
   273 #endif
   274   add_sample (&cpu, count);
   275 }
   276 
   277 static void
   278 deliver_profiler_signal (int signal)
   279 {
   280   deliver_process_signal (signal, handle_profiler_signal);
   281 }
   282 
   283 static int
   284 setup_cpu_timer (Lisp_Object sampling_interval)
   285 {
   286   int billion = 1000000000;
   287 
   288   if (! RANGED_FIXNUMP (1, sampling_interval,
   289                          (TYPE_MAXIMUM (time_t) < EMACS_INT_MAX / billion
   290                           ? ((EMACS_INT) TYPE_MAXIMUM (time_t) * billion
   291                              + (billion - 1))
   292                           : EMACS_INT_MAX)))
   293     return -1;
   294 
   295   current_sampling_interval = XFIXNUM (sampling_interval);
   296   struct timespec interval
   297     = make_timespec (current_sampling_interval / billion,
   298                      current_sampling_interval % billion);
   299   struct sigaction action;
   300   emacs_sigaction_init (&action, deliver_profiler_signal);
   301   sigaction (SIGPROF, &action, 0);
   302 
   303 #ifdef HAVE_ITIMERSPEC
   304   if (! profiler_timer_ok)
   305     {
   306       /* System clocks to try, in decreasing order of desirability.  */
   307       static clockid_t const system_clock[] = {
   308 #ifdef CLOCK_THREAD_CPUTIME_ID
   309         CLOCK_THREAD_CPUTIME_ID,
   310 #endif
   311 #ifdef CLOCK_PROCESS_CPUTIME_ID
   312         CLOCK_PROCESS_CPUTIME_ID,
   313 #endif
   314 #ifdef CLOCK_MONOTONIC
   315         CLOCK_MONOTONIC,
   316 #endif
   317         CLOCK_REALTIME
   318       };
   319       struct sigevent sigev;
   320       sigev.sigev_value.sival_ptr = &profiler_timer;
   321       sigev.sigev_signo = SIGPROF;
   322       sigev.sigev_notify = SIGEV_SIGNAL;
   323 
   324       for (int i = 0; i < ARRAYELTS (system_clock); i++)
   325         if (timer_create (system_clock[i], &sigev, &profiler_timer) == 0)
   326           {
   327             profiler_timer_ok = true;
   328             break;
   329           }
   330     }
   331 
   332   if (profiler_timer_ok)
   333     {
   334       struct itimerspec ispec;
   335       ispec.it_value = ispec.it_interval = interval;
   336       if (timer_settime (profiler_timer, 0, &ispec, 0) == 0)
   337         return TIMER_SETTIME_RUNNING;
   338     }
   339 #endif
   340 
   341 #ifdef HAVE_SETITIMER
   342   struct itimerval timer;
   343   timer.it_value = timer.it_interval = make_timeval (interval);
   344   if (setitimer (ITIMER_PROF, &timer, 0) == 0)
   345     return SETITIMER_RUNNING;
   346 #endif
   347 
   348   return NOT_RUNNING;
   349 }
   350 
   351 DEFUN ("profiler-cpu-start", Fprofiler_cpu_start, Sprofiler_cpu_start,
   352        1, 1, 0,
   353        doc: /* Start or restart the cpu profiler.
   354 It takes call-stack samples each SAMPLING-INTERVAL nanoseconds, approximately.
   355 See also `profiler-log-size' and `profiler-max-stack-depth'.  */)
   356   (Lisp_Object sampling_interval)
   357 {
   358   if (profiler_cpu_running)
   359     error ("CPU profiler is already running");
   360 
   361   if (NILP (cpu.log))
   362     cpu = make_log ();
   363 
   364   int status = setup_cpu_timer (sampling_interval);
   365   if (status < 0)
   366     {
   367       profiler_cpu_running = NOT_RUNNING;
   368       error ("Invalid sampling interval");
   369     }
   370   else
   371     {
   372       profiler_cpu_running = status;
   373       if (! profiler_cpu_running)
   374         error ("Unable to start profiler timer");
   375     }
   376 
   377   return Qt;
   378 }
   379 
   380 DEFUN ("profiler-cpu-stop", Fprofiler_cpu_stop, Sprofiler_cpu_stop,
   381        0, 0, 0,
   382        doc: /* Stop the cpu profiler.  The profiler log is not affected.
   383 Return non-nil if the profiler was running.  */)
   384   (void)
   385 {
   386   switch (profiler_cpu_running)
   387     {
   388     case NOT_RUNNING:
   389       return Qnil;
   390 
   391 #ifdef HAVE_ITIMERSPEC
   392     case TIMER_SETTIME_RUNNING:
   393       {
   394         struct itimerspec disable = { 0, };
   395         timer_settime (profiler_timer, 0, &disable, 0);
   396       }
   397       break;
   398 #endif
   399 
   400 #ifdef HAVE_SETITIMER
   401     case SETITIMER_RUNNING:
   402       {
   403         struct itimerval disable = { 0, };
   404         setitimer (ITIMER_PROF, &disable, 0);
   405       }
   406       break;
   407 #endif
   408     }
   409 
   410   signal (SIGPROF, SIG_IGN);
   411   profiler_cpu_running = NOT_RUNNING;
   412   return Qt;
   413 }
   414 
   415 DEFUN ("profiler-cpu-running-p",
   416        Fprofiler_cpu_running_p, Sprofiler_cpu_running_p,
   417        0, 0, 0,
   418        doc: /* Return non-nil if cpu profiler is running.  */)
   419   (void)
   420 {
   421   return profiler_cpu_running ? Qt : Qnil;
   422 }
   423 
   424 static Lisp_Object
   425 export_log (struct profiler_log *log)
   426 {
   427   Lisp_Object result = log->log;
   428   if (log->gc_count)
   429     Fputhash (CALLN (Fvector, QAutomatic_GC, Qnil),
   430               make_fixnum (log->gc_count),
   431               result);
   432   if (log->discarded)
   433     Fputhash (CALLN (Fvector, QDiscarded_Samples, Qnil),
   434               make_fixnum (log->discarded),
   435               result);
   436   /* Here we're making the log visible to Elisp, so it's not safe any
   437      more for our use afterwards since we can't rely on its special
   438      pre-allocated keys anymore.  So we have to allocate a new one.  */
   439   if (profiler_cpu_running)
   440     *log = make_log ();
   441   return result;
   442 }
   443 
   444 DEFUN ("profiler-cpu-log", Fprofiler_cpu_log, Sprofiler_cpu_log,
   445        0, 0, 0,
   446        doc: /* Return the current cpu profiler log.
   447 The log is a hash-table mapping backtraces to counters which represent
   448 the amount of time spent at those points.  Every backtrace is a vector
   449 of functions, where the last few elements may be nil.
   450 Before returning, a new log is allocated for future samples.  */)
   451   (void)
   452 {
   453   return (export_log (&cpu));
   454 }
   455 #endif /* PROFILER_CPU_SUPPORT */
   456 
   457 /* Memory profiler.  */
   458 
   459 /* True if memory profiler is running.  */
   460 bool profiler_memory_running;
   461 
   462 DEFUN ("profiler-memory-start", Fprofiler_memory_start, Sprofiler_memory_start,
   463        0, 0, 0,
   464        doc: /* Start/restart the memory profiler.
   465 The memory profiler will take samples of the call-stack whenever a new
   466 allocation takes place.  Note that most small allocations only trigger
   467 the profiler occasionally.
   468 See also `profiler-log-size' and `profiler-max-stack-depth'.  */)
   469   (void)
   470 {
   471   if (profiler_memory_running)
   472     error ("Memory profiler is already running");
   473 
   474   if (NILP (memory.log))
   475     memory = make_log ();
   476 
   477   profiler_memory_running = true;
   478 
   479   return Qt;
   480 }
   481 
   482 DEFUN ("profiler-memory-stop",
   483        Fprofiler_memory_stop, Sprofiler_memory_stop,
   484        0, 0, 0,
   485        doc: /* Stop the memory profiler.  The profiler log is not affected.
   486 Return non-nil if the profiler was running.  */)
   487   (void)
   488 {
   489   if (!profiler_memory_running)
   490     return Qnil;
   491   profiler_memory_running = false;
   492   return Qt;
   493 }
   494 
   495 DEFUN ("profiler-memory-running-p",
   496        Fprofiler_memory_running_p, Sprofiler_memory_running_p,
   497        0, 0, 0,
   498        doc: /* Return non-nil if memory profiler is running.  */)
   499   (void)
   500 {
   501   return profiler_memory_running ? Qt : Qnil;
   502 }
   503 
   504 DEFUN ("profiler-memory-log",
   505        Fprofiler_memory_log, Sprofiler_memory_log,
   506        0, 0, 0,
   507        doc: /* Return the current memory profiler log.
   508 The log is a hash-table mapping backtraces to counters which represent
   509 the amount of memory allocated at those points.  Every backtrace is a vector
   510 of functions, where the last few elements may be nil.
   511 Before returning, a new log is allocated for future samples.  */)
   512   (void)
   513 {
   514   return (export_log (&memory));
   515 }
   516 
   517 
   518 /* Signals and probes.  */
   519 
   520 /* Record that the current backtrace allocated SIZE bytes.  */
   521 void
   522 malloc_probe (size_t size)
   523 {
   524   add_sample (&memory, min (size, MOST_POSITIVE_FIXNUM));
   525 }
   526 
   527 DEFUN ("function-equal", Ffunction_equal, Sfunction_equal, 2, 2, 0,
   528        doc: /* Return non-nil if F1 and F2 come from the same source.
   529 Used to determine if different closures are just different instances of
   530 the same lambda expression, or are really unrelated function.  */)
   531      (Lisp_Object f1, Lisp_Object f2)
   532 {
   533   bool res;
   534   if (EQ (f1, f2))
   535     res = true;
   536   else if (COMPILEDP (f1) && COMPILEDP (f2))
   537     res = EQ (AREF (f1, COMPILED_BYTECODE), AREF (f2, COMPILED_BYTECODE));
   538   else if (CONSP (f1) && CONSP (f2) && CONSP (XCDR (f1)) && CONSP (XCDR (f2))
   539            && EQ (Qclosure, XCAR (f1))
   540            && EQ (Qclosure, XCAR (f2)))
   541     res = EQ (XCDR (XCDR (f1)), XCDR (XCDR (f2)));
   542   else
   543     res = false;
   544   return res ? Qt : Qnil;
   545 }
   546 
   547 static Lisp_Object
   548 cmpfn_profiler (Lisp_Object bt1, Lisp_Object bt2, struct Lisp_Hash_Table *h)
   549 {
   550   if (EQ (bt1, bt2))
   551     return Qt;
   552   else if (VECTORP (bt1) && VECTORP (bt2))
   553     {
   554       ptrdiff_t l = ASIZE (bt1);
   555       if (l != ASIZE (bt2))
   556         return Qnil;
   557       for (ptrdiff_t i = 0; i < l; i++)
   558         if (NILP (Ffunction_equal (AREF (bt1, i), AREF (bt2, i))))
   559           return Qnil;
   560       return Qt;
   561     }
   562   else
   563     return Qnil;
   564 }
   565 
   566 static Lisp_Object
   567 hashfn_profiler (Lisp_Object bt, struct Lisp_Hash_Table *h)
   568 {
   569   EMACS_UINT hash;
   570   if (VECTORP (bt))
   571     {
   572       hash = 0;
   573       ptrdiff_t l = ASIZE (bt);
   574       for (ptrdiff_t i = 0; i < l; i++)
   575         {
   576           Lisp_Object f = AREF (bt, i);
   577           EMACS_UINT hash1
   578             = (COMPILEDP (f) ? XHASH (AREF (f, COMPILED_BYTECODE))
   579                : (CONSP (f) && CONSP (XCDR (f)) && EQ (Qclosure, XCAR (f)))
   580                ? XHASH (XCDR (XCDR (f))) : XHASH (f));
   581           hash = sxhash_combine (hash, hash1);
   582         }
   583     }
   584   else
   585     hash = XHASH (bt);
   586   return make_ufixnum (SXHASH_REDUCE (hash));
   587 }
   588 
   589 static void syms_of_profiler_for_pdumper (void);
   590 
   591 void
   592 syms_of_profiler (void)
   593 {
   594   DEFVAR_INT ("profiler-max-stack-depth", profiler_max_stack_depth,
   595               doc: /* Number of elements from the call-stack recorded in the log.  */);
   596   profiler_max_stack_depth = 16;
   597   DEFVAR_INT ("profiler-log-size", profiler_log_size,
   598               doc: /* Number of distinct call-stacks that can be recorded in a profiler log.
   599 If the log gets full, some of the least-seen call-stacks will be evicted
   600 to make room for new entries.  */);
   601   profiler_log_size = 10000;
   602 
   603   DEFSYM (Qprofiler_backtrace_equal, "profiler-backtrace-equal");
   604   DEFSYM (QDiscarded_Samples, "Discarded Samples");
   605 
   606   defsubr (&Sfunction_equal);
   607 
   608 #ifdef PROFILER_CPU_SUPPORT
   609   profiler_cpu_running = NOT_RUNNING;
   610   cpu.log = Qnil;
   611   staticpro (&cpu.log);
   612   defsubr (&Sprofiler_cpu_start);
   613   defsubr (&Sprofiler_cpu_stop);
   614   defsubr (&Sprofiler_cpu_running_p);
   615   defsubr (&Sprofiler_cpu_log);
   616 #endif
   617   profiler_memory_running = false;
   618   memory.log = Qnil;
   619   staticpro (&memory.log);
   620   defsubr (&Sprofiler_memory_start);
   621   defsubr (&Sprofiler_memory_stop);
   622   defsubr (&Sprofiler_memory_running_p);
   623   defsubr (&Sprofiler_memory_log);
   624 
   625   pdumper_do_now_and_after_load (syms_of_profiler_for_pdumper);
   626 }
   627 
   628 static void
   629 syms_of_profiler_for_pdumper (void)
   630 {
   631   if (dumped_with_pdumper_p ())
   632     {
   633 #ifdef PROFILER_CPU_SUPPORT
   634       cpu.log = Qnil;
   635 #endif
   636       memory.log = Qnil;
   637     }
   638   else
   639     {
   640 #ifdef PROFILER_CPU_SUPPORT
   641       eassert (NILP (cpu.log));
   642 #endif
   643       eassert (NILP (memory.log));
   644     }
   645 
   646 }

/* [<][>][^][v][top][bottom][index][help] */