root/src/haiku.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. list_system_processes
  2. system_process_attributes
  3. buf_next
  4. buf_prev
  5. getavg
  6. sample_sys_load
  7. getloadavg

     1 /* Haiku subroutines that are general to the Haiku operating system.
     2    Copyright (C) 2021-2023 Free Software Foundation, Inc.
     3 
     4 This file is part of GNU Emacs.
     5 
     6 GNU Emacs is free software: you can redistribute it and/or modify
     7 it under the terms of the GNU General Public License as published by
     8 the Free Software Foundation, either version 3 of the License, or (at
     9 your option) any later version.
    10 
    11 GNU Emacs is distributed in the hope that it will be useful,
    12 but WITHOUT ANY WARRANTY; without even the implied warranty of
    13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    14 GNU General Public License for more details.
    15 
    16 You should have received a copy of the GNU General Public License
    17 along with GNU Emacs.  If not, see <https://www.gnu.org/licenses/>.  */
    18 
    19 #include <config.h>
    20 
    21 #include "lisp.h"
    22 #include "process.h"
    23 #include "coding.h"
    24 
    25 #include <kernel/OS.h>
    26 
    27 #include <pwd.h>
    28 #include <stdlib.h>
    29 
    30 Lisp_Object
    31 list_system_processes (void)
    32 {
    33   team_info info;
    34   int32 cookie = 0;
    35   Lisp_Object lval = Qnil;
    36 
    37   while (get_next_team_info (&cookie, &info) == B_OK)
    38     lval = Fcons (make_fixnum (info.team), lval);
    39 
    40   return lval;
    41 }
    42 
    43 Lisp_Object
    44 system_process_attributes (Lisp_Object pid)
    45 {
    46   CHECK_FIXNUM (pid);
    47 
    48   team_info info;
    49   Lisp_Object lval = Qnil;
    50   thread_info inf;
    51   area_info area;
    52   team_id id = (team_id) XFIXNUM (pid);
    53   struct passwd *g;
    54   size_t mem = 0;
    55 
    56   if (get_team_info (id, &info) != B_OK)
    57     return Qnil;
    58 
    59   bigtime_t everything = 0, vsample = 0;
    60   bigtime_t cpu_eaten = 0, esample = 0;
    61 
    62   lval = Fcons (Fcons (Qeuid, make_fixnum (info.uid)), lval);
    63   lval = Fcons (Fcons (Qegid, make_fixnum (info.gid)), lval);
    64   lval = Fcons (Fcons (Qthcount, make_fixnum (info.thread_count)), lval);
    65   lval = Fcons (Fcons (Qcomm, build_string_from_utf8 (info.args)), lval);
    66 
    67   g = getpwuid (info.uid);
    68 
    69   if (g && g->pw_name)
    70     lval = Fcons (Fcons (Quser, build_string (g->pw_name)), lval);
    71 
    72   /* FIXME: Calculating this makes Emacs show up as using 100% CPU! */
    73 
    74   for (int32 team_cookie = 0;
    75        get_next_team_info (&team_cookie, &info) == B_OK;)
    76     for (int32 thread_cookie = 0;
    77          get_next_thread_info (info.team, &thread_cookie, &inf) == B_OK;)
    78       {
    79         if (inf.team == id && strncmp (inf.name, "idle thread ", 12))
    80           cpu_eaten += inf.user_time + inf.kernel_time;
    81         everything += inf.user_time + inf.kernel_time;
    82       }
    83 
    84   sleep (0.05);
    85 
    86   for (int32 team_cookie = 0;
    87        get_next_team_info (&team_cookie, &info) == B_OK;)
    88     for (int32 thread_cookie = 0;
    89          get_next_thread_info (info.team, &thread_cookie, &inf) == B_OK;)
    90       {
    91         if (inf.team == id && strncmp (inf.name, "idle thread ", 12))
    92           esample += inf.user_time + inf.kernel_time;
    93         vsample += inf.user_time + inf.kernel_time;
    94       }
    95 
    96   cpu_eaten = esample - cpu_eaten;
    97   everything = vsample - everything;
    98 
    99   if (everything)
   100     lval = Fcons (Fcons (Qpcpu, make_float (((double) (cpu_eaten) /
   101                                              (double) (everything)) * 100)),
   102                   lval);
   103   else
   104     lval = Fcons (Fcons (Qpcpu, make_float (0.0)), lval);
   105 
   106   for (ssize_t area_cookie = 0;
   107        get_next_area_info (id, &area_cookie, &area) == B_OK;)
   108     mem += area.ram_size;
   109 
   110   system_info sinfo;
   111   get_system_info (&sinfo);
   112   int64 max = (int64) sinfo.max_pages * B_PAGE_SIZE;
   113 
   114   lval = Fcons (Fcons (Qpmem, make_float (((double) mem /
   115                                            (double) max) * 100)),
   116                 lval);
   117   lval = Fcons (Fcons (Qrss, make_fixnum (mem / 1024)), lval);
   118 
   119   return lval;
   120 }
   121 
   122 
   123 /* Borrowed from w32 implementation.  */
   124 
   125 struct load_sample
   126 {
   127   time_t sample_time;
   128   bigtime_t idle;
   129   bigtime_t kernel;
   130   bigtime_t user;
   131 };
   132 
   133 /* We maintain 1-sec samples for the last 16 minutes in a circular buffer.  */
   134 static struct load_sample samples[16*60];
   135 static int first_idx = -1, last_idx = -1;
   136 static int max_idx = ARRAYELTS (samples);
   137 static unsigned num_of_processors = 0;
   138 
   139 static int
   140 buf_next (int from)
   141 {
   142   int next_idx = from + 1;
   143 
   144   if (next_idx >= max_idx)
   145     next_idx = 0;
   146 
   147   return next_idx;
   148 }
   149 
   150 static int
   151 buf_prev (int from)
   152 {
   153   int prev_idx = from - 1;
   154 
   155   if (prev_idx < 0)
   156     prev_idx = max_idx - 1;
   157 
   158   return prev_idx;
   159 }
   160 
   161 static double
   162 getavg (int which)
   163 {
   164   double retval = -1.0;
   165   double tdiff;
   166   int idx;
   167   double span = (which == 0 ? 1.0 : (which == 1 ? 5.0 : 15.0)) * 60;
   168   time_t now = samples[last_idx].sample_time;
   169 
   170   if (first_idx != last_idx)
   171     {
   172       for (idx = buf_prev (last_idx); ; idx = buf_prev (idx))
   173         {
   174           tdiff = difftime (now, samples[idx].sample_time);
   175           if (tdiff >= span - 2 * DBL_EPSILON * now)
   176             {
   177               long double sys =
   178                 (samples[last_idx].kernel + samples[last_idx].user) -
   179                 (samples[idx].kernel + samples[idx].user);
   180               long double idl = samples[last_idx].idle - samples[idx].idle;
   181 
   182               retval = (idl / (sys + idl)) * num_of_processors;
   183               break;
   184             }
   185           if (idx == first_idx)
   186             break;
   187         }
   188     }
   189 
   190   return retval;
   191 }
   192 
   193 static void
   194 sample_sys_load (bigtime_t *idle, bigtime_t *system, bigtime_t *user)
   195 {
   196   bigtime_t i = 0, s = 0, u = 0;
   197   team_info info;
   198   thread_info inf;
   199 
   200   for (int32 team_cookie = 0;
   201        get_next_team_info (&team_cookie, &info) == B_OK;)
   202     for (int32 thread_cookie = 0;
   203          get_next_thread_info (info.team, &thread_cookie, &inf) == B_OK;)
   204       {
   205         if (!strncmp (inf.name, "idle thread ", 12))
   206           i += inf.user_time + inf.kernel_time;
   207         else
   208           s += inf.kernel_time, u += inf.user_time;
   209       }
   210 
   211   *idle = i;
   212   *system = s;
   213   *user = u;
   214 }
   215 
   216 int
   217 getloadavg (double loadavg[], int nelem)
   218 {
   219   int elem;
   220   bigtime_t idle, kernel, user;
   221   time_t now = time (NULL);
   222 
   223   if (num_of_processors <= 0)
   224     {
   225       system_info i;
   226       if (get_system_info (&i) == B_OK)
   227         num_of_processors = i.cpu_count;
   228     }
   229 
   230   /* If system time jumped back for some reason, delete all samples
   231      whose time is later than the current wall-clock time.  This
   232      prevents load average figures from becoming frozen for prolonged
   233      periods of time, when system time is reset backwards.  */
   234   if (last_idx >= 0)
   235     {
   236       while (difftime (now, samples[last_idx].sample_time) < -1.0)
   237         {
   238           if (last_idx == first_idx)
   239             {
   240               first_idx = last_idx = -1;
   241               break;
   242             }
   243           last_idx = buf_prev (last_idx);
   244         }
   245     }
   246 
   247   /* Store another sample.  We ignore samples that are less than 1 sec
   248      apart.  */
   249   if (last_idx < 0
   250       || (difftime (now, samples[last_idx].sample_time)
   251           >= 1.0 - 2 * DBL_EPSILON * now))
   252     {
   253       sample_sys_load (&idle, &kernel, &user);
   254       last_idx = buf_next (last_idx);
   255       samples[last_idx].sample_time = now;
   256       samples[last_idx].idle = idle;
   257       samples[last_idx].kernel = kernel;
   258       samples[last_idx].user = user;
   259       /* If the buffer has more that 15 min worth of samples, discard
   260          the old ones.  */
   261       if (first_idx == -1)
   262         first_idx = last_idx;
   263       while (first_idx != last_idx
   264              && (difftime (now, samples[first_idx].sample_time)
   265                  >= 15.0 * 60 + 2 * DBL_EPSILON * now))
   266         first_idx = buf_next (first_idx);
   267     }
   268 
   269   for (elem = 0; elem < nelem; elem++)
   270     {
   271       double avg = getavg (elem);
   272 
   273       if (avg < 0)
   274         break;
   275       loadavg[elem] = avg;
   276     }
   277 
   278   /* Always return at least one element, otherwise load-average
   279      returns nil, and Lisp programs might decide we cannot measure
   280      system load.  For example, jit-lock-stealth-load's defcustom
   281      might decide that feature is "unsupported".  */
   282   if (elem == 0)
   283     loadavg[elem++] = 0.09;     /* < display-time-load-average-threshold */
   284 
   285   return elem;
   286 }

/* [<][>][^][v][top][bottom][index][help] */