1 /* vi: set sw=4 ts=4: */
3 * Per-processor statistics, based on sysstat version 9.1.2 by Sebastien Godard
5 * Copyright (C) 2010 Marek Polacek <mmpolacek@gmail.com>
7 * Licensed under GPLv2, see file License in this tarball for details.
10 //applet:IF_MPSTAT(APPLET(mpstat, _BB_DIR_BIN, _BB_SUID_DROP))
12 //kbuild:lib-$(CONFIG_MPSTAT) += mpstat.o
14 //config:config MPSTAT
15 //config: bool "mpstat"
18 //config: Per-processor statistics
21 #include <sys/utsname.h> /* struct utsname */
23 //#define debug(fmt, ...) fprintf(stderr, fmt, ## __VA_ARGS__)
24 #define debug(fmt, ...) ((void)0)
26 /* Size of /proc/interrupts line, CPU data excluded */
27 #define INTERRUPTS_LINE 64
28 /* Maximum number of interrupts */
30 #define NR_IRQCPU_PREALLOC 3
31 #define MAX_IRQNAME_LEN 16
32 #define MAX_PF_NAME 512
33 /* sysstat 9.0.6 uses width 8, but newer code which also prints /proc/softirqs
34 * data needs more: "interrupts" in /proc/softirqs have longer names,
35 * most are up to 8 chars, one (BLOCK_IOPOLL) is even longer.
36 * We are printing headers in the " IRQNAME/s" form, experimentally
37 * anything smaller than 10 chars looks ugly for /proc/softirqs stats.
39 #define INTRATE_SCRWIDTH 10
40 #define INTRATE_SCRWIDTH_STR "10"
43 #define SYSFS_DEVCPU "/sys/devices/system/cpu"
44 #define PROCFS_STAT "/proc/stat"
45 #define PROCFS_INTERRUPTS "/proc/interrupts"
46 #define PROCFS_SOFTIRQS "/proc/softirqs"
47 #define PROCFS_UPTIME "/proc/uptime"
51 typedef unsigned long long data_t;
52 typedef long long idata_t;
54 #define DATA_MAX ULLONG_MAX
56 typedef unsigned long data_t;
59 #define DATA_MAX ULONG_MAX
65 char irq_name[MAX_IRQNAME_LEN];
85 /* Globals. Sort by size and access frequency. */
89 unsigned cpu_nr; /* Number of CPUs */
90 unsigned irqcpu_nr; /* Number of interrupts per CPU */
91 unsigned softirqcpu_nr; /* Number of soft interrupts per CPU */
94 unsigned cpu_bitmap_len;
96 // 9.0.6 does not do it. Try "mpstat -A 1 2" - headers are repeated!
97 //smallint header_done;
98 //smallint avg_header_done;
99 unsigned char *cpu_bitmap; /* Bit 0: global, bit 1: 1st proc... */
100 data_t global_uptime[3];
101 data_t per_cpu_uptime[3];
102 struct stats_cpu *st_cpu[3];
103 struct stats_irq *st_irq[3];
104 struct stats_irqcpu *st_irqcpu[3];
105 struct stats_irqcpu *st_softirqcpu[3];
106 struct tm timestamp[3];
108 #define G (*ptr_to_globals)
109 #define INIT_G() do { \
110 SET_PTR_TO_GLOBALS(xzalloc(sizeof(G))); \
113 /* The selected interrupts statistics (bits in G.options) */
122 /* Does str start with "cpu"? */
123 static int starts_with_cpu(const char *str)
125 return !((str[0] - 'c') | (str[1] - 'p') | (str[2] - 'u'));
129 static ALWAYS_INLINE int display_opt(int opt)
131 return (opt & G.options);
134 #if DATA_MAX > 0xffffffff
136 * Handle overflow conditions properly for counters which can have
137 * less bits than data_t, depending on the kernel version.
139 /* Surprisingly, on 32bit inlining is a size win */
140 static ALWAYS_INLINE data_t overflow_safe_sub(data_t prev, data_t curr)
142 data_t v = curr - prev;
144 if ((idata_t)v < 0 /* curr < prev - counter overflow? */
145 && prev <= 0xffffffff /* kernel uses 32bit value for the counter? */
147 /* Add 33th bit set to 1 to curr, compensating for the overflow */
148 /* double shift defeats "warning: left shift count >= width of type" */
149 v += ((data_t)1 << 16) << 16;
154 static ALWAYS_INLINE data_t overflow_safe_sub(data_t prev, data_t curr)
160 static double percent_value(data_t prev, data_t curr, data_t itv)
162 return ((double)overflow_safe_sub(prev, curr)) / itv * 100;
165 static double hz_value(data_t prev, data_t curr, data_t itv)
167 //bb_error_msg("curr:%lld prev:%lld G.hz:%u", curr, prev, G.hz);
168 return ((double)overflow_safe_sub(prev, curr)) / itv * G.hz;
171 static ALWAYS_INLINE data_t jiffies_diff(data_t old, data_t new)
173 data_t diff = new - old;
174 return (diff == 0) ? 1 : diff;
177 static int is_cpu_in_bitmap(unsigned cpu)
179 return G.cpu_bitmap[cpu >> 3] & (1 << (cpu & 7));
182 static void write_irqcpu_stats(struct stats_irqcpu *per_cpu_stats[],
185 int prev, int current,
186 const char *prev_str, const char *current_str)
190 struct stats_irqcpu *p0, *q0;
192 /* Check if number of IRQs has changed */
193 if (G.interval != 0) {
194 for (j = 0; j <= total_irqs; j++) {
195 p0 = &per_cpu_stats[current][j];
196 if (p0->irq_name[0] != '\0') {
197 q0 = &per_cpu_stats[prev][j];
198 if (strcmp(p0->irq_name, q0->irq_name) != 0) {
199 /* Strings are different */
207 printf("\n%-11s CPU", prev_str);
209 /* A bit complex code to "buy back" space if one header is too wide.
210 * Here's how it looks like. BLOCK_IOPOLL eats too much space,
211 * and latter headers use smaller width to compensate:
212 * ...BLOCK/s BLOCK_IOPOLL/s TASKLET/s SCHED/s HRTIMER/s RCU/s
213 * ... 2.32 0.00 0.01 17.58 0.14 141.96
215 int expected_len = 0;
217 for (j = 0; j < total_irqs; j++) {
218 p0 = &per_cpu_stats[current][j];
219 if (p0->irq_name[0] != '\0') {
220 int n = (INTRATE_SCRWIDTH-3) - (printed_len - expected_len);
221 printed_len += printf(" %*s/s", n > 0 ? n : 0, skip_whitespace(p0->irq_name));
222 expected_len += INTRATE_SCRWIDTH;
228 for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
229 /* Check if we want stats about this CPU */
230 if (!is_cpu_in_bitmap(cpu) && G.p_option) {
234 printf("%-11s %4u", current_str, cpu - 1);
236 for (j = 0; j < total_irqs; j++) {
237 /* IRQ field set only for proc 0 */
238 p0 = &per_cpu_stats[current][j];
241 * An empty string for irq name means that
242 * interrupt is no longer used.
244 if (p0->irq_name[0] != '\0') {
246 q0 = &per_cpu_stats[prev][offset];
249 * If we want stats for the time since boot
250 * we have p0->irq != q0->irq.
252 if (strcmp(p0->irq_name, q0->irq_name) != 0
257 q0 = &per_cpu_stats[prev][offset];
259 if (strcmp(p0->irq_name, q0->irq_name) != 0
260 && (j + 1 < total_irqs)
263 q0 = &per_cpu_stats[prev][offset];
267 if (strcmp(p0->irq_name, q0->irq_name) == 0
270 struct stats_irqcpu *p, *q;
271 p = &per_cpu_stats[current][(cpu - 1) * total_irqs + j];
272 q = &per_cpu_stats[prev][(cpu - 1) * total_irqs + offset];
273 printf("%"INTRATE_SCRWIDTH_STR".2f",
274 (double)(p->interrupt - q->interrupt) / itv * G.hz);
284 static data_t get_per_cpu_interval(const struct stats_cpu *scc,
285 const struct stats_cpu *scp)
287 return ((scc->cpu_user + scc->cpu_nice +
288 scc->cpu_system + scc->cpu_iowait +
289 scc->cpu_idle + scc->cpu_steal +
290 scc->cpu_irq + scc->cpu_softirq) -
291 (scp->cpu_user + scp->cpu_nice +
292 scp->cpu_system + scp->cpu_iowait +
293 scp->cpu_idle + scp->cpu_steal +
294 scp->cpu_irq + scp->cpu_softirq));
297 static void print_stats_cpu_struct(const struct stats_cpu *p,
298 const struct stats_cpu *c,
301 printf(" %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f\n",
302 percent_value(p->cpu_user - p->cpu_guest,
303 /**/ c->cpu_user - c->cpu_guest, itv),
304 percent_value(p->cpu_nice , c->cpu_nice , itv),
305 percent_value(p->cpu_system , c->cpu_system , itv),
306 percent_value(p->cpu_iowait , c->cpu_iowait , itv),
307 percent_value(p->cpu_irq , c->cpu_irq , itv),
308 percent_value(p->cpu_softirq, c->cpu_softirq, itv),
309 percent_value(p->cpu_steal , c->cpu_steal , itv),
310 percent_value(p->cpu_guest , c->cpu_guest , itv),
311 percent_value(p->cpu_idle , c->cpu_idle , itv)
315 static void write_stats_core(int prev, int current,
316 const char *prev_str, const char *current_str)
318 struct stats_cpu *scc, *scp;
319 data_t itv, global_itv;
322 /* Compute time interval */
323 itv = global_itv = jiffies_diff(G.global_uptime[prev], G.global_uptime[current]);
325 /* Reduce interval to one CPU */
327 itv = jiffies_diff(G.per_cpu_uptime[prev], G.per_cpu_uptime[current]);
329 /* Print CPU stats */
330 if (display_opt(D_CPU)) {
332 ///* This is done exactly once */
333 //if (!G.header_done) {
334 printf("\n%-11s CPU %%usr %%nice %%sys %%iowait %%irq %%soft %%steal %%guest %%idle\n",
337 // G.header_done = 1;
340 for (cpu = 0; cpu <= G.cpu_nr; cpu++) {
343 /* Print stats about this particular CPU? */
344 if (!is_cpu_in_bitmap(cpu))
347 scc = &G.st_cpu[current][cpu];
348 scp = &G.st_cpu[prev][cpu];
349 per_cpu_itv = global_itv;
351 printf((cpu ? "%-11s %4u" : "%-11s all"), current_str, cpu - 1);
355 * If the CPU is offline, then it isn't in /proc/stat,
356 * so all values are 0.
357 * NB: Guest time is already included in user time.
359 if ((scc->cpu_user | scc->cpu_nice | scc->cpu_system |
360 scc->cpu_iowait | scc->cpu_idle | scc->cpu_steal |
361 scc->cpu_irq | scc->cpu_softirq) == 0
364 * Set current struct fields to values from prev.
365 * iteration. Then their values won't jump from
366 * zero, when the CPU comes back online.
372 /* Compute interval again for current proc */
373 per_cpu_itv = get_per_cpu_interval(scc, scp);
374 if (per_cpu_itv == 0) {
376 * If the CPU is tickless then there is no change in CPU values
377 * but the sum of values is not zero.
381 printf(" %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f\n",
382 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, idle);
386 print_stats_cpu_struct(scp, scc, per_cpu_itv);
390 /* Print total number of IRQs per CPU */
391 if (display_opt(D_IRQ_SUM)) {
393 ///* Print average header, this is done exactly once */
394 //if (!G.avg_header_done) {
395 printf("\n%-11s CPU intr/s\n", prev_str);
396 // G.avg_header_done = 1;
399 for (cpu = 0; cpu <= G.cpu_nr; cpu++) {
402 /* Print stats about this CPU? */
403 if (!is_cpu_in_bitmap(cpu))
407 printf((cpu ? "%-11s %4u" : "%-11s all"), current_str, cpu - 1);
409 scc = &G.st_cpu[current][cpu];
410 scp = &G.st_cpu[prev][cpu];
411 /* Compute interval again for current proc */
412 per_cpu_itv = get_per_cpu_interval(scc, scp);
413 if (per_cpu_itv == 0) {
414 printf(" %9.2f\n", 0.0);
418 //bb_error_msg("G.st_irq[%u][%u].irq_nr:%lld - G.st_irq[%u][%u].irq_nr:%lld",
419 // current, cpu, G.st_irq[prev][cpu].irq_nr, prev, cpu, G.st_irq[current][cpu].irq_nr);
420 printf(" %9.2f\n", hz_value(G.st_irq[prev][cpu].irq_nr, G.st_irq[current][cpu].irq_nr, per_cpu_itv));
424 if (display_opt(D_IRQ_CPU)) {
425 write_irqcpu_stats(G.st_irqcpu, G.irqcpu_nr,
428 prev_str, current_str
432 if (display_opt(D_SOFTIRQS)) {
433 write_irqcpu_stats(G.st_softirqcpu, G.softirqcpu_nr,
436 prev_str, current_str
442 * Print the statistics
444 static void write_stats(int current)
449 strftime(prev_time, sizeof(prev_time), "%X", &G.timestamp[!current]);
450 strftime(curr_time, sizeof(curr_time), "%X", &G.timestamp[current]);
452 write_stats_core(!current, current, prev_time, curr_time);
455 static void write_stats_avg(int current)
457 write_stats_core(2, current, "Average:", "Average:");
461 * Read CPU statistics
463 static void get_cpu_statistics(struct stats_cpu *cpu, data_t *up, data_t *up0)
468 fp = xfopen_for_read(PROCFS_STAT);
470 while (fgets(buf, sizeof(buf), fp)) {
473 struct stats_cpu *cp;
475 if (!starts_with_cpu(buf))
476 continue; /* not "cpu" */
478 cp = cpu; /* for "cpu " case */
482 || sscanf(buf + 3, "%u ", &cpu_number) != 1
483 || cpu_number >= G.cpu_nr
487 cp = &cpu[cpu_number + 1];
490 /* Read the counters, save them */
491 /* Not all fields have to be present */
492 memset(cp, 0, sizeof(*cp));
494 " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u"
495 " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u"
496 " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u",
497 &cp->cpu_user, &cp->cpu_nice, &cp->cpu_system,
498 &cp->cpu_idle, &cp->cpu_iowait, &cp->cpu_irq,
499 &cp->cpu_softirq, &cp->cpu_steal, &cp->cpu_guest
502 * Compute uptime in jiffies (1/HZ), it'll be the sum of
503 * individual CPU's uptimes.
504 * NB: We have to omit cpu_guest, because cpu_user includes it.
506 sum = cp->cpu_user + cp->cpu_nice + cp->cpu_system +
507 cp->cpu_idle + cp->cpu_iowait + cp->cpu_irq +
508 cp->cpu_softirq + cp->cpu_steal;
515 if (cpu_number == 0 && *up0 != 0) {
516 /* Compute uptime of single CPU */
525 * Read IRQs from /proc/stat
527 static void get_irqs_from_stat(struct stats_irq *irq)
532 fp = fopen_for_read(PROCFS_STAT);
536 while (fgets(buf, sizeof(buf), fp)) {
537 //bb_error_msg("/proc/stat:'%s'", buf);
538 if (strncmp(buf, "intr ", 5) == 0) {
539 /* Read total number of IRQs since system boot */
540 sscanf(buf + 5, "%"FMT_DATA"u", &irq->irq_nr);
548 * Read stats from /proc/interrupts or /proc/softirqs
550 static void get_irqs_from_interrupts(const char *fname,
551 struct stats_irqcpu *per_cpu_stats[],
552 int irqs_per_cpu, int current)
555 struct stats_irq *irq_i;
556 struct stats_irqcpu *ic;
561 int cpu_index[G.cpu_nr];
565 // Otherwise reading of /proc/softirqs
566 // was resetting counts to 0 after we painstakingly collected them from
567 // /proc/interrupts. Which resulted in:
568 // 01:32:47 PM CPU intr/s
569 // 01:32:47 PM all 591.47
570 // 01:32:47 PM 0 0.00 <= ???
571 // 01:32:47 PM 1 0.00 <= ???
572 // for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
573 // G.st_irq[current][cpu].irq_nr = 0;
574 // //bb_error_msg("G.st_irq[%u][%u].irq_nr=0", current, cpu);
577 fp = fopen_for_read(fname);
581 buflen = INTERRUPTS_LINE + 16 * G.cpu_nr;
582 buf = xmalloc(buflen);
584 /* Parse header and determine, which CPUs are online */
586 while (fgets(buf, buflen, fp)) {
589 while ((cp = strstr(next, "CPU")) != NULL
592 cpu = strtoul(cp + 3, &next, 10);
593 cpu_index[iindex++] = cpu;
595 if (iindex) /* We found header */
600 while (fgets(buf, buflen, fp)
601 && irq < irqs_per_cpu
607 /* Skip over "IRQNAME:" */
608 cp = strchr(buf, ':');
613 ic = &per_cpu_stats[current][irq];
615 if (len >= sizeof(ic->irq_name)) {
616 len = sizeof(ic->irq_name) - 1;
618 safe_strncpy(ic->irq_name, buf, len + 1);
619 //bb_error_msg("%s: irq%d:'%s' buf:'%s'", fname, irq, ic->irq_name, buf);
622 for (cpu = 0; cpu < iindex; cpu++) {
624 ic = &per_cpu_stats[current][cpu_index[cpu] * irqs_per_cpu + irq];
625 irq_i = &G.st_irq[current][cpu_index[cpu] + 1];
626 ic->interrupt = strtoul(cp, &next, 10);
627 /* Count only numerical IRQs */
628 if (isdigit(last_char)) {
629 irq_i->irq_nr += ic->interrupt;
630 //bb_error_msg("G.st_irq[%u][%u].irq_nr + %u = %lld",
631 // current, cpu_index[cpu] + 1, ic->interrupt, irq_i->irq_nr);
640 while (irq < irqs_per_cpu) {
641 /* Number of interrupts per CPU has changed */
642 ic = &per_cpu_stats[current][irq];
643 ic->irq_name[0] = '\0'; /* False interrupt */
648 static void get_uptime(data_t *uptime)
651 char buf[sizeof(long)*3 * 2 + 4]; /* enough for long.long */
652 unsigned long uptime_sec, decimal;
654 fp = fopen_for_read(PROCFS_UPTIME);
657 if (fgets(buf, sizeof(buf), fp)) {
658 if (sscanf(buf, "%lu.%lu", &uptime_sec, &decimal) == 2) {
659 *uptime = (data_t)uptime_sec * G.hz + decimal * G.hz / 100;
666 static void get_localtime(struct tm *tm)
670 localtime_r(&timer, tm);
673 static void alarm_handler(int sig UNUSED_PARAM)
675 signal(SIGALRM, alarm_handler);
679 static void main_loop(void)
686 G.per_cpu_uptime[0] = 0;
687 get_uptime(&G.per_cpu_uptime[0]);
690 get_cpu_statistics(G.st_cpu[0], &G.global_uptime[0], &G.per_cpu_uptime[0]);
692 if (display_opt(D_IRQ_SUM))
693 get_irqs_from_stat(G.st_irq[0]);
695 if (display_opt(D_IRQ_SUM | D_IRQ_CPU))
696 get_irqs_from_interrupts(PROCFS_INTERRUPTS, G.st_irqcpu,
699 if (display_opt(D_SOFTIRQS))
700 get_irqs_from_interrupts(PROCFS_SOFTIRQS, G.st_softirqcpu,
703 if (G.interval == 0) {
704 /* Display since boot time */
706 G.timestamp[1] = G.timestamp[0];
707 memset(G.st_cpu[1], 0, sizeof(G.st_cpu[1][0]) * cpus);
708 memset(G.st_irq[1], 0, sizeof(G.st_irq[1][0]) * cpus);
709 memset(G.st_irqcpu[1], 0, sizeof(G.st_irqcpu[1][0]) * cpus * G.irqcpu_nr);
710 memset(G.st_softirqcpu[1], 0, sizeof(G.st_softirqcpu[1][0]) * cpus * G.softirqcpu_nr);
718 /* Set a handler for SIGALRM */
721 /* Save the stats we already have. We need them to compute the average */
722 G.timestamp[2] = G.timestamp[0];
723 G.global_uptime[2] = G.global_uptime[0];
724 G.per_cpu_uptime[2] = G.per_cpu_uptime[0];
726 memcpy(G.st_cpu[2], G.st_cpu[0], sizeof(G.st_cpu[0][0]) * cpus);
727 memcpy(G.st_irq[2], G.st_irq[0], sizeof(G.st_irq[0][0]) * cpus);
728 memcpy(G.st_irqcpu[2], G.st_irqcpu[0], sizeof(G.st_irqcpu[0][0]) * cpus * G.irqcpu_nr);
729 if (display_opt(D_SOFTIRQS)) {
730 memcpy(G.st_softirqcpu[2], G.st_softirqcpu[0],
731 sizeof(G.st_softirqcpu[0][0]) * cpus * G.softirqcpu_nr);
736 /* Suspend until a signal is received */
739 /* Set structures to 0 to distinguish off/online CPUs */
740 memset(&G.st_cpu[current][/*cpu:*/ 1], 0, sizeof(G.st_cpu[0][0]) * G.cpu_nr);
742 get_localtime(&G.timestamp[current]);
746 G.per_cpu_uptime[current] = 0;
747 get_uptime(&G.per_cpu_uptime[current]);
749 get_cpu_statistics(G.st_cpu[current], &G.global_uptime[current], &G.per_cpu_uptime[current]);
751 if (display_opt(D_IRQ_SUM))
752 get_irqs_from_stat(G.st_irq[current]);
754 if (display_opt(D_IRQ_SUM | D_IRQ_CPU)) {
756 for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
757 G.st_irq[current][cpu].irq_nr = 0;
759 /* accumulates .irq_nr */
760 get_irqs_from_interrupts(PROCFS_INTERRUPTS, G.st_irqcpu,
761 G.irqcpu_nr, current);
764 if (display_opt(D_SOFTIRQS))
765 get_irqs_from_interrupts(PROCFS_SOFTIRQS,
767 G.softirqcpu_nr, current);
769 write_stats(current);
779 /* Print average statistics */
780 write_stats_avg(current);
785 /* Get number of clock ticks per sec */
786 static ALWAYS_INLINE unsigned get_hz(void)
788 return sysconf(_SC_CLK_TCK);
791 static void alloc_struct(int cpus)
794 for (i = 0; i < 3; i++) {
795 G.st_cpu[i] = xzalloc(sizeof(G.st_cpu[i][0]) * cpus);
796 G.st_irq[i] = xzalloc(sizeof(G.st_irq[i][0]) * cpus);
797 G.st_irqcpu[i] = xzalloc(sizeof(G.st_irqcpu[i][0]) * cpus * G.irqcpu_nr);
798 G.st_softirqcpu[i] = xzalloc(sizeof(G.st_softirqcpu[i][0]) * cpus * G.softirqcpu_nr);
800 G.cpu_bitmap_len = (cpus >> 3) + 1;
801 G.cpu_bitmap = xzalloc(G.cpu_bitmap_len);
804 static void print_header(struct tm *t)
809 /* Get system name, release number and hostname */
812 strftime(cur_date, sizeof(cur_date), "%x", t);
814 printf("%s %s (%s)\t%s\t_%s_\t(%u CPU)\n",
815 uts.sysname, uts.release, uts.nodename, cur_date, uts.machine, G.cpu_nr);
819 * Get number of processors in /proc/stat
820 * Return value '0' means one CPU and non SMP kernel.
821 * Otherwise N means N processor(s) and SMP kernel.
823 static int get_cpu_nr(void)
829 fp = xfopen_for_read(PROCFS_STAT);
830 while (fgets(line, sizeof(line), fp)) {
831 if (!starts_with_cpu(line)) {
833 break; /* we are past "cpuN..." lines */
836 if (line[3] != ' ') { /* "cpuN" */
838 if (sscanf(line + 3, "%u", &num_proc) == 1
839 && num_proc > proc_nr
851 * Get number of interrupts available per processor
853 static int get_irqcpu_nr(const char *f, int max_irqs)
860 fp = fopen_for_read(f);
861 if (!fp) /* No interrupts file */
864 linelen = INTERRUPTS_LINE + 16 * G.cpu_nr;
865 line = xmalloc(linelen);
868 while (fgets(line, linelen, fp)
871 int p = strcspn(line, ":");
872 if ((p > 0) && (p < 16))
882 //usage:#define mpstat_trivial_usage
883 //usage: "[-A] [-I SUM|CPU|ALL|SCPU] [-u] [-P num|ALL] [INTERVAL [COUNT]]"
884 //usage:#define mpstat_full_usage "\n\n"
885 //usage: "Per-processor statistics\n"
886 //usage: "\nOptions:"
887 //usage: "\n -A Same as -I ALL -u -P ALL"
888 //usage: "\n -I SUM|CPU|ALL|SCPU Report interrupt statistics"
889 //usage: "\n -P num|ALL Processor to monitor"
890 //usage: "\n -u Report CPU utilization"
892 int mpstat_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
893 int mpstat_main(int UNUSED_PARAM argc, char **argv)
899 OPT_ALL = 1 << 0, /* -A */
900 OPT_INTS = 1 << 1, /* -I */
901 OPT_SETCPU = 1 << 2, /* -P */
902 OPT_UTIL = 1 << 3, /* -u */
905 /* Dont buffer data if redirected to a pipe */
906 setbuf(stdout, NULL);
912 /* Get number of processors */
913 G.cpu_nr = get_cpu_nr();
915 /* Get number of clock ticks per sec */
918 /* Calculate number of interrupts per processor */
919 G.irqcpu_nr = get_irqcpu_nr(PROCFS_INTERRUPTS, NR_IRQS) + NR_IRQCPU_PREALLOC;
921 /* Calculate number of soft interrupts per processor */
922 G.softirqcpu_nr = get_irqcpu_nr(PROCFS_SOFTIRQS, NR_IRQS) + NR_IRQCPU_PREALLOC;
924 /* Allocate space for structures. + 1 for global structure. */
925 alloc_struct(G.cpu_nr + 1);
927 /* Parse and process arguments */
928 opt = getopt32(argv, "AI:P:u", &opt_irq_fmt, &opt_set_cpu);
933 G.interval = xatoi_positive(*argv);
937 /* Get count value */
940 G.count = xatoi_positive(*argv);
950 G.options |= D_CPU + D_IRQ_SUM + D_IRQ_CPU + D_SOFTIRQS;
951 /* Select every CPU */
952 memset(G.cpu_bitmap, 0xff, G.cpu_bitmap_len);
955 if (opt & OPT_INTS) {
956 static const char v[] = {
957 D_IRQ_CPU, D_IRQ_SUM, D_SOFTIRQS,
958 D_IRQ_SUM + D_IRQ_CPU + D_SOFTIRQS
960 i = index_in_strings("CPU\0SUM\0SCPU\0ALL\0", opt_irq_fmt);
966 if ((opt & OPT_UTIL) /* -u? */
967 || G.options == 0 /* nothing? (use default then) */
972 if (opt & OPT_SETCPU) {
976 for (t = strtok(opt_set_cpu, ","); t; t = strtok(NULL, ",")) {
977 if (strcmp(t, "ALL") == 0) {
978 /* Select every CPU */
979 memset(G.cpu_bitmap, 0xff, G.cpu_bitmap_len);
982 unsigned n = xatoi_positive(t);
984 bb_error_msg_and_die("not that many processors");
986 G.cpu_bitmap[n >> 3] |= 1 << (n & 7);
992 /* Display global stats */
996 get_localtime(&G.timestamp[0]);
999 print_header(&G.timestamp[0]);
1004 if (ENABLE_FEATURE_CLEAN_UP) {
1006 for (i = 0; i < 3; i++) {
1009 free(G.st_irqcpu[i]);
1010 free(G.st_softirqcpu[i]);
1016 return EXIT_SUCCESS;