1 /* vi: set sw=4 ts=4: */
3 * A tiny 'top' utility.
5 * This is written specifically for the linux /proc/<PID>/stat(m)
8 * This reads the PIDs of all processes and their status and shows
9 * the status of processes (first ones that fit to screen) at given
13 * - At startup this changes to /proc, all the reads are then
16 * (C) Eero Tamminen <oak at welho dot com>
18 * Rewritten by Vladimir Oleynik (C) 2002 <dzo@simtreas.ru>
20 * Sept 2008: Vineet Gupta <vineet.gupta@arc.com>
21 * Added Support for reporting SMP Information
22 * - CPU where Process was last seen running
23 * (to see effect of sched_setaffinity() etc)
24 * - CPU Time Split (idle/IO/wait etc) PER CPU
26 * Copyright (c) 1992 Branko Lankester
27 * Copyright (c) 1992 Roger Binns
28 * Copyright (C) 1994-1996 Charles L. Blake.
29 * Copyright (C) 1992-1998 Michael K. Johnson
31 * Licensed under GPLv2, see file LICENSE in this tarball for details.
37 typedef struct top_status_t {
39 #if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
41 unsigned pcpu; /* delta of ticks */
47 #if ENABLE_FEATURE_TOP_SMP_PROCESS
52 typedef struct jiffy_counts_t {
53 /* Linux 2.4.x has only first four */
54 unsigned long long usr, nic, sys, idle;
55 unsigned long long iowait, irq, softirq, steal;
56 unsigned long long total;
57 unsigned long long busy;
60 /* This structure stores some critical information from one frame to
61 the next. Used for finding deltas. */
62 typedef struct save_hist {
67 typedef int (*cmp_funcp)(top_status_t *P, top_status_t *Q);
70 enum { SORT_DEPTH = 3 };
76 #if ENABLE_FEATURE_TOPMEM
80 #if ENABLE_FEATURE_TOP_SMP_CPU
81 smallint smp_cpu_info; /* one/many cpu info lines? */
83 #if ENABLE_FEATURE_USE_TERMIOS
84 struct termios initial_settings;
86 #if !ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
87 cmp_funcp sort_function[1];
89 cmp_funcp sort_function[SORT_DEPTH];
90 struct save_hist *prev_hist;
92 jiffy_counts_t cur_jif, prev_jif;
93 /* int hist_iterations; */
95 /* unsigned long total_vsz; */
97 #if ENABLE_FEATURE_TOP_SMP_CPU
98 /* Per CPU samples: current and last */
99 jiffy_counts_t *cpu_jif, *cpu_prev_jif;
103 }; //FIX_ALIASING; - large code growth
104 enum { LINE_BUF_SIZE = COMMON_BUFSIZE - offsetof(struct globals, line_buf) };
105 #define G (*(struct globals*)&bb_common_bufsiz1)
106 struct BUG_bad_size {
107 char BUG_G_too_big[sizeof(G) <= COMMON_BUFSIZE ? 1 : -1];
108 char BUG_line_buf_too_small[LINE_BUF_SIZE > 80 ? 1 : -1];
110 #define INIT_G() do { } while (0)
112 #define ntop (G.ntop )
113 #define sort_field (G.sort_field )
114 #define inverted (G.inverted )
115 #define smp_cpu_info (G.smp_cpu_info )
116 #define initial_settings (G.initial_settings )
117 #define sort_function (G.sort_function )
118 #define prev_hist (G.prev_hist )
119 #define prev_hist_count (G.prev_hist_count )
120 #define cur_jif (G.cur_jif )
121 #define prev_jif (G.prev_jif )
122 #define cpu_jif (G.cpu_jif )
123 #define cpu_prev_jif (G.cpu_prev_jif )
124 #define num_cpus (G.num_cpus )
125 #define total_pcpu (G.total_pcpu )
126 #define line_buf (G.line_buf )
133 OPT_EOF = (1 << 4), /* pseudo: "we saw EOF in stdin" */
135 #define OPT_BATCH_MODE (option_mask32 & OPT_b)
138 #if ENABLE_FEATURE_USE_TERMIOS
139 static int pid_sort(top_status_t *P, top_status_t *Q)
141 /* Buggy wrt pids with high bit set */
142 /* (linux pids are in [1..2^15-1]) */
143 return (Q->pid - P->pid);
147 static int mem_sort(top_status_t *P, top_status_t *Q)
149 /* We want to avoid unsigned->signed and truncation errors */
150 if (Q->vsz < P->vsz) return -1;
151 return Q->vsz != P->vsz; /* 0 if ==, 1 if > */
155 #if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
157 static int pcpu_sort(top_status_t *P, top_status_t *Q)
159 /* Buggy wrt ticks with high bit set */
160 /* Affects only processes for which ticks overflow */
161 return (int)Q->pcpu - (int)P->pcpu;
164 static int time_sort(top_status_t *P, top_status_t *Q)
166 /* We want to avoid unsigned->signed and truncation errors */
167 if (Q->ticks < P->ticks) return -1;
168 return Q->ticks != P->ticks; /* 0 if ==, 1 if > */
171 static int mult_lvl_cmp(void* a, void* b)
175 for (i = 0; i < SORT_DEPTH; i++) {
176 cmp_val = (*sort_function[i])(a, b);
183 static NOINLINE int read_cpu_jiffy(FILE *fp, jiffy_counts_t *p_jif)
185 #if !ENABLE_FEATURE_TOP_SMP_CPU
186 static const char fmt[] = "cpu %llu %llu %llu %llu %llu %llu %llu %llu";
188 static const char fmt[] = "cp%*s %llu %llu %llu %llu %llu %llu %llu %llu";
192 if (!fgets(line_buf, LINE_BUF_SIZE, fp) || line_buf[0] != 'c' /* not "cpu" */)
194 ret = sscanf(line_buf, fmt,
195 &p_jif->usr, &p_jif->nic, &p_jif->sys, &p_jif->idle,
196 &p_jif->iowait, &p_jif->irq, &p_jif->softirq,
199 p_jif->total = p_jif->usr + p_jif->nic + p_jif->sys + p_jif->idle
200 + p_jif->iowait + p_jif->irq + p_jif->softirq + p_jif->steal;
201 /* procps 2.x does not count iowait as busy time */
202 p_jif->busy = p_jif->total - p_jif->idle - p_jif->iowait;
208 static void get_jiffy_counts(void)
210 FILE* fp = xfopen_for_read("stat");
212 /* We need to parse cumulative counts even if SMP CPU display is on,
213 * they are used to calculate per process CPU% */
215 if (read_cpu_jiffy(fp, &cur_jif) < 4)
216 bb_error_msg_and_die("can't read /proc/stat");
218 #if !ENABLE_FEATURE_TOP_SMP_CPU
228 /* First time here. How many CPUs?
229 * There will be at least 1 /proc/stat line with cpu%d
232 cpu_jif = xrealloc_vector(cpu_jif, 1, num_cpus);
233 if (read_cpu_jiffy(fp, &cpu_jif[num_cpus]) <= 4)
237 if (num_cpus == 0) /* /proc/stat with only "cpu ..." line?! */
240 cpu_prev_jif = xzalloc(sizeof(cpu_prev_jif[0]) * num_cpus);
242 /* Otherwise the first per cpu display shows all 100% idles */
244 } else { /* Non first time invocation */
248 /* First switch the sample pointers: no need to copy */
250 cpu_prev_jif = cpu_jif;
253 /* Get the new samples */
254 for (i = 0; i < num_cpus; i++)
255 read_cpu_jiffy(fp, &cpu_jif[i]);
261 static void do_stats(void)
266 struct save_hist *new_hist;
271 new_hist = xmalloc(sizeof(new_hist[0]) * ntop);
273 * Make a pass through the data to get stats.
275 /* hist_iterations = 0; */
277 for (n = 0; n < ntop; n++) {
281 * Calculate time in cur process. Time is sum of user time
285 new_hist[n].ticks = cur->ticks;
286 new_hist[n].pid = pid;
288 /* find matching entry from previous pass */
290 /* do not start at index 0, continue at last used one
291 * (brought hist_iterations from ~14000 down to 172) */
293 if (prev_hist_count) do {
294 if (prev_hist[i].pid == pid) {
295 cur->pcpu = cur->ticks - prev_hist[i].ticks;
296 total_pcpu += cur->pcpu;
299 i = (i+1) % prev_hist_count;
300 /* hist_iterations++; */
301 } while (i != last_i);
302 /* total_vsz += cur->vsz; */
306 * Save cur frame's information.
309 prev_hist = new_hist;
310 prev_hist_count = ntop;
313 #endif /* FEATURE_TOP_CPU_USAGE_PERCENTAGE */
315 #if ENABLE_FEATURE_TOP_CPU_GLOBAL_PERCENTS && ENABLE_FEATURE_TOP_DECIMALS
316 /* formats 7 char string (8 with terminating NUL) */
317 static char *fmt_100percent_8(char pbuf[8], unsigned value, unsigned total)
320 if (value >= total) { /* 100% ? */
321 strcpy(pbuf, " 100% ");
324 /* else generate " [N/space]N.N% " string */
325 value = 1000 * value / total;
329 pbuf[1] = t ? t + '0' : ' ';
330 pbuf[2] = '0' + (value / 10);
332 pbuf[4] = '0' + (value % 10);
340 #if ENABLE_FEATURE_TOP_CPU_GLOBAL_PERCENTS
341 static void display_cpus(int scr_width, char *scrbuf, int *lines_rem_p)
344 * xxx% = (cur_jif.xxx - prev_jif.xxx) / (cur_jif.total - prev_jif.total) * 100%
347 jiffy_counts_t *p_jif, *p_prev_jif;
349 # if ENABLE_FEATURE_TOP_SMP_CPU
353 /* using (unsigned) casts to make operations cheaper */
354 # define CALC_TOTAL_DIFF do { \
355 total_diff = (unsigned)(p_jif->total - p_prev_jif->total); \
356 if (total_diff == 0) total_diff = 1; \
359 # if ENABLE_FEATURE_TOP_DECIMALS
360 # define CALC_STAT(xxx) char xxx[8]
361 # define SHOW_STAT(xxx) fmt_100percent_8(xxx, (unsigned)(p_jif->xxx - p_prev_jif->xxx), total_diff)
364 # define CALC_STAT(xxx) unsigned xxx = 100 * (unsigned)(p_jif->xxx - p_prev_jif->xxx) / total_diff
365 # define SHOW_STAT(xxx) xxx
366 # define FMT "%4u%% "
369 # if !ENABLE_FEATURE_TOP_SMP_CPU
373 p_prev_jif = &prev_jif;
375 /* Loop thru CPU(s) */
376 n_cpu_lines = smp_cpu_info ? num_cpus : 1;
377 if (n_cpu_lines > *lines_rem_p)
378 n_cpu_lines = *lines_rem_p;
380 for (i = 0; i < n_cpu_lines; i++) {
382 p_prev_jif = &cpu_prev_jif[i];
386 { /* Need a block: CALC_STAT are declarations */
394 /*CALC_STAT(steal);*/
396 snprintf(scrbuf, scr_width,
397 /* Barely fits in 79 chars when in "decimals" mode. */
398 # if ENABLE_FEATURE_TOP_SMP_CPU
399 "CPU%s:"FMT"usr"FMT"sys"FMT"nic"FMT"idle"FMT"io"FMT"irq"FMT"sirq",
400 (smp_cpu_info ? utoa(i) : ""),
402 "CPU:"FMT"usr"FMT"sys"FMT"nic"FMT"idle"FMT"io"FMT"irq"FMT"sirq",
404 SHOW_STAT(usr), SHOW_STAT(sys), SHOW_STAT(nic), SHOW_STAT(idle),
405 SHOW_STAT(iowait), SHOW_STAT(irq), SHOW_STAT(softirq)
406 /*, SHOW_STAT(steal) - what is this 'steal' thing? */
407 /* I doubt anyone wants to know it */
417 #else /* !ENABLE_FEATURE_TOP_CPU_GLOBAL_PERCENTS */
418 # define display_cpus(scr_width, scrbuf, lines_rem) ((void)0)
421 static unsigned long display_header(int scr_width, int *lines_rem_p)
426 unsigned long total, used, mfree, shared, buffers, cached;
428 /* read memory info */
429 fp = xfopen_for_read("meminfo");
432 * Old kernels (such as 2.4.x) had a nice summary of memory info that
433 * we could parse, however this is gone entirely in 2.6. Try parsing
434 * the old way first, and if that fails, parse each field manually.
436 * First, we read in the first line. Old kernels will have bogus
437 * strings we don't care about, whereas new kernels will start right
441 if (fscanf(fp, "MemTotal: %lu %s\n", &total, buf) != 2) {
442 fgets(buf, sizeof(buf), fp); /* skip first line */
444 fscanf(fp, "Mem: %lu %lu %lu %lu %lu %lu",
445 &total, &used, &mfree, &shared, &buffers, &cached);
446 /* convert to kilobytes */
455 * Revert to manual parsing, which incidentally already has the
456 * sizes in kilobytes. This should be safe for both 2.4 and
459 fscanf(fp, "MemFree: %lu %s\n", &mfree, buf);
462 * MemShared: is no longer present in 2.6. Report this as 0,
463 * to maintain consistent behavior with normal procps.
465 if (fscanf(fp, "MemShared: %lu %s\n", &shared, buf) != 2)
468 fscanf(fp, "Buffers: %lu %s\n", &buffers, buf);
469 fscanf(fp, "Cached: %lu %s\n", &cached, buf);
471 used = total - mfree;
475 /* output memory info */
476 if (scr_width > (int)sizeof(scrbuf))
477 scr_width = sizeof(scrbuf);
478 snprintf(scrbuf, scr_width,
479 "Mem: %luK used, %luK free, %luK shrd, %luK buff, %luK cached",
480 used, mfree, shared, buffers, cached);
481 /* go to top & clear to the end of screen */
482 printf(OPT_BATCH_MODE ? "%s\n" : "\033[H\033[J%s\n", scrbuf);
485 /* Display CPU time split as percentage of total time
486 * This displays either a cumulative line or one line per CPU
488 display_cpus(scr_width, scrbuf, lines_rem_p);
490 /* read load average as a string */
492 open_read_close("loadavg", buf, sizeof(buf) - 1);
493 buf[sizeof(buf) - 1] = '\n';
494 *strchr(buf, '\n') = '\0';
495 snprintf(scrbuf, scr_width, "Load average: %s", buf);
502 static NOINLINE void display_process_list(int lines_rem, int scr_width)
505 BITS_PER_INT = sizeof(int) * 8
510 unsigned long total_memory = display_header(scr_width, &lines_rem); /* or use total_vsz? */
511 /* xxx_shift and xxx_scale variables allow us to replace
512 * expensive divides with multiply and shift */
513 unsigned pmem_shift, pmem_scale, pmem_half;
514 #if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
515 unsigned tmp_unsigned;
516 unsigned pcpu_shift, pcpu_scale, pcpu_half;
520 /* what info of the processes is shown */
521 printf(OPT_BATCH_MODE ? "%.*s" : "\033[7m%.*s\033[0m", scr_width,
522 " PID PPID USER STAT VSZ %MEM"
523 IF_FEATURE_TOP_SMP_PROCESS(" CPU")
524 IF_FEATURE_TOP_CPU_USAGE_PERCENTAGE(" %CPU")
528 #if ENABLE_FEATURE_TOP_DECIMALS
529 # define UPSCALE 1000
530 # define CALC_STAT(name, val) div_t name = div((val), 10)
531 # define SHOW_STAT(name) name.quot, '0'+name.rem
532 # define FMT "%3u.%c"
535 # define CALC_STAT(name, val) unsigned name = (val)
536 # define SHOW_STAT(name) name
540 * MEM% = s->vsz/MemTotal
542 pmem_shift = BITS_PER_INT-11;
543 pmem_scale = UPSCALE*(1U<<(BITS_PER_INT-11)) / total_memory;
544 /* s->vsz is in kb. we want (s->vsz * pmem_scale) to never overflow */
545 while (pmem_scale >= 512) {
549 pmem_half = (1U << pmem_shift) / (ENABLE_FEATURE_TOP_DECIMALS? 20 : 2);
550 #if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
551 busy_jifs = cur_jif.busy - prev_jif.busy;
552 /* This happens if there were lots of short-lived processes
553 * between two top updates (e.g. compilation) */
554 if (total_pcpu < busy_jifs) total_pcpu = busy_jifs;
557 * CPU% = s->pcpu/sum(s->pcpu) * busy_cpu_ticks/total_cpu_ticks
558 * (pcpu is delta of sys+user time between samples)
560 /* (cur_jif.xxx - prev_jif.xxx) and s->pcpu are
561 * in 0..~64000 range (HZ*update_interval).
562 * we assume that unsigned is at least 32-bit.
565 pcpu_scale = UPSCALE*64 * (uint16_t)busy_jifs;
568 while (pcpu_scale < (1U << (BITS_PER_INT-2))) {
572 tmp_unsigned = (uint16_t)(cur_jif.total - prev_jif.total) * total_pcpu;
573 if (tmp_unsigned != 0)
574 pcpu_scale /= tmp_unsigned;
575 /* we want (s->pcpu * pcpu_scale) to never overflow */
576 while (pcpu_scale >= 1024) {
580 pcpu_half = (1U << pcpu_shift) / (ENABLE_FEATURE_TOP_DECIMALS? 20 : 2);
581 /* printf(" pmem_scale=%u pcpu_scale=%u ", pmem_scale, pcpu_scale); */
584 /* Ok, all preliminary data is ready, go through the list */
585 scr_width += 2; /* account for leading '\n' and trailing NUL */
586 if (lines_rem > ntop)
589 while (--lines_rem >= 0) {
591 CALC_STAT(pmem, (s->vsz*pmem_scale + pmem_half) >> pmem_shift);
592 #if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
593 CALC_STAT(pcpu, (s->pcpu*pcpu_scale + pcpu_half) >> pcpu_shift);
596 if (s->vsz >= 100000)
597 sprintf(vsz_str_buf, "%6ldm", s->vsz/1024);
599 sprintf(vsz_str_buf, "%7ld", s->vsz);
600 /* PID PPID USER STAT VSZ %MEM [%CPU] COMMAND */
601 col = snprintf(line_buf, scr_width,
602 "\n" "%5u%6u %-8.8s %s%s" FMT
603 IF_FEATURE_TOP_SMP_PROCESS(" %3d")
604 IF_FEATURE_TOP_CPU_USAGE_PERCENTAGE(FMT)
606 s->pid, s->ppid, get_cached_username(s->uid),
607 s->state, vsz_str_buf,
609 IF_FEATURE_TOP_SMP_PROCESS(, s->last_seen_on_cpu)
610 IF_FEATURE_TOP_CPU_USAGE_PERCENTAGE(, SHOW_STAT(pcpu))
612 if ((int)(col + 1) < scr_width)
613 read_cmdline(line_buf + col, scr_width - col, s->pid, s->comm);
614 fputs(line_buf, stdout);
615 /* printf(" %d/%d %lld/%lld", s->pcpu, total_pcpu,
616 cur_jif.busy - prev_jif.busy, cur_jif.total - prev_jif.total); */
619 /* printf(" %d", hist_iterations); */
620 bb_putchar(OPT_BATCH_MODE ? '\n' : '\r');
628 static void clearmems(void)
630 clear_username_cache();
636 #if ENABLE_FEATURE_USE_TERMIOS
638 static void reset_term(void)
640 tcsetattr_stdin_TCSANOW(&initial_settings);
641 if (ENABLE_FEATURE_CLEAN_UP) {
643 # if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
649 static void sig_catcher(int sig UNUSED_PARAM)
654 #endif /* FEATURE_USE_TERMIOS */
660 typedef unsigned long mem_t;
662 typedef struct topmem_status_t {
665 /* vsz doesn't count /dev/xxx mappings except /dev/zero */
675 enum { NUM_SORT_FIELD = 7 };
677 #define topmem ((topmem_status_t*)top)
679 #if ENABLE_FEATURE_TOPMEM
681 static int topmem_sort(char *a, char *b)
686 n = offsetof(topmem_status_t, vsz) + (sort_field * sizeof(mem_t));
687 l = *(mem_t*)(a + n);
688 r = *(mem_t*)(b + n);
693 /* We want to avoid unsigned->signed and truncation errors */
694 /* l>r: -1, l=r: 0, l<r: 1 */
695 n = (l > r) ? -1 : (l != r);
696 return inverted ? -n : n;
699 /* display header info (meminfo / loadavg) */
700 static void display_topmem_header(int scr_width, int *lines_rem_p)
703 TOTAL = 0, MFREE, BUF, CACHE,
704 SWAPTOTAL, SWAPFREE, DIRTY,
705 MWRITE, ANON, MAP, SLAB,
708 static const char match[NUM_FIELDS][12] = {
709 "\x09" "MemTotal:", // TOTAL
710 "\x08" "MemFree:", // MFREE
711 "\x08" "Buffers:", // BUF
712 "\x07" "Cached:", // CACHE
713 "\x0a" "SwapTotal:", // SWAPTOTAL
714 "\x09" "SwapFree:", // SWAPFREE
715 "\x06" "Dirty:", // DIRTY
716 "\x0a" "Writeback:", // MWRITE
717 "\x0a" "AnonPages:", // ANON
718 "\x07" "Mapped:", // MAP
719 "\x05" "Slab:", // SLAB
721 char meminfo_buf[4 * 1024];
722 const char *Z[NUM_FIELDS];
726 for (i = 0; i < NUM_FIELDS; i++)
729 /* read memory info */
730 sz = open_read_close("meminfo", meminfo_buf, sizeof(meminfo_buf) - 1);
732 char *p = meminfo_buf;
733 meminfo_buf[sz] = '\0';
734 /* Note that fields always appear in the match[] order */
735 for (i = 0; i < NUM_FIELDS; i++) {
736 char *found = strstr(p, match[i] + 1);
738 /* Cut "NNNN" out of " NNNN kb" */
739 char *s = skip_whitespace(found + match[i][0]);
740 p = skip_non_whitespace(s);
747 snprintf(line_buf, LINE_BUF_SIZE,
748 "Mem total:%s anon:%s map:%s free:%s",
749 Z[TOTAL], Z[ANON], Z[MAP], Z[MFREE]);
750 printf(OPT_BATCH_MODE ? "%.*s\n" : "\033[H\033[J%.*s\n", scr_width, line_buf);
752 snprintf(line_buf, LINE_BUF_SIZE,
753 " slab:%s buf:%s cache:%s dirty:%s write:%s",
754 Z[SLAB], Z[BUF], Z[CACHE], Z[DIRTY], Z[MWRITE]);
755 printf("%.*s\n", scr_width, line_buf);
757 snprintf(line_buf, LINE_BUF_SIZE,
758 "Swap total:%s free:%s", // TODO: % used?
759 Z[SWAPTOTAL], Z[SWAPFREE]);
760 printf("%.*s\n", scr_width, line_buf);
765 static void ulltoa6_and_space(unsigned long long ul, char buf[6])
767 /* see http://en.wikipedia.org/wiki/Tera */
768 smart_ulltoa5(ul, buf, " mgtpezy");
772 static NOINLINE void display_topmem_process_list(int lines_rem, int scr_width)
774 #define HDR_STR " PID VSZ VSZRW RSS (SHR) DIRTY (SHR) STACK"
775 #define MIN_WIDTH sizeof(HDR_STR)
776 const topmem_status_t *s = topmem;
778 display_topmem_header(scr_width, &lines_rem);
779 strcpy(line_buf, HDR_STR " COMMAND");
780 line_buf[5 + sort_field * 6] = '*';
781 printf(OPT_BATCH_MODE ? "%.*s" : "\e[7m%.*s\e[0m", scr_width, line_buf);
784 if (lines_rem > ntop)
786 while (--lines_rem >= 0) {
787 /* PID VSZ VSZRW RSS (SHR) DIRTY (SHR) COMMAND */
788 ulltoa6_and_space(s->pid , &line_buf[0*6]);
789 ulltoa6_and_space(s->vsz , &line_buf[1*6]);
790 ulltoa6_and_space(s->vszrw , &line_buf[2*6]);
791 ulltoa6_and_space(s->rss , &line_buf[3*6]);
792 ulltoa6_and_space(s->rss_sh , &line_buf[4*6]);
793 ulltoa6_and_space(s->dirty , &line_buf[5*6]);
794 ulltoa6_and_space(s->dirty_sh, &line_buf[6*6]);
795 ulltoa6_and_space(s->stack , &line_buf[7*6]);
796 line_buf[8*6] = '\0';
797 if (scr_width > (int)MIN_WIDTH) {
798 read_cmdline(&line_buf[8*6], scr_width - MIN_WIDTH, s->pid, s->comm);
800 printf("\n""%.*s", scr_width, line_buf);
803 bb_putchar(OPT_BATCH_MODE ? '\n' : '\r');
810 void display_topmem_process_list(int lines_rem, int scr_width);
811 int topmem_sort(char *a, char *b);
835 int top_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
836 int top_main(int argc UNUSED_PARAM, char **argv)
842 char *str_interval, *str_iterations;
843 unsigned scan_mask = TOP_MASK;
844 #if ENABLE_FEATURE_USE_TERMIOS
845 struct termios new_settings;
846 struct pollfd pfd[1];
850 pfd[0].events = POLLIN;
855 interval = 5; /* default update interval is 5 seconds */
856 iterations = 0; /* infinite */
857 #if ENABLE_FEATURE_TOP_SMP_CPU
859 /*smp_cpu_info = 0;*/ /* to start with show aggregate */
861 cpu_prev_jif = &prev_jif;
864 /* all args are options; -n NUM */
865 opt_complementary = "-"; /* options can be specified w/o dash */
866 col = getopt32(argv, "d:n:b"IF_FEATURE_TOPMEM("m"), &str_interval, &str_iterations);
867 #if ENABLE_FEATURE_TOPMEM
868 if (col & OPT_m) /* -m (busybox specific) */
869 scan_mask = TOPMEM_MASK;
872 /* work around for "-d 1" -> "-d -1" done by getopt32
873 * (opt_complementary == "-" does this) */
874 if (str_interval[0] == '-')
876 /* Need to limit it to not overflow poll timeout */
877 interval = xatou16(str_interval);
880 if (str_iterations[0] == '-')
882 iterations = xatou(str_iterations);
885 /* change to /proc */
887 #if ENABLE_FEATURE_USE_TERMIOS
888 tcgetattr(0, (void *) &initial_settings);
889 memcpy(&new_settings, &initial_settings, sizeof(new_settings));
890 /* unbuffered input, turn off echo */
891 new_settings.c_lflag &= ~(ISIG | ICANON | ECHO | ECHONL);
893 bb_signals(BB_FATAL_SIGS, sig_catcher);
894 tcsetattr_stdin_TCSANOW(&new_settings);
897 #if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
898 sort_function[0] = pcpu_sort;
899 sort_function[1] = mem_sort;
900 sort_function[2] = time_sort;
902 sort_function[0] = mem_sort;
906 procps_status_t *p = NULL;
908 lines = 24; /* default */
910 #if ENABLE_FEATURE_USE_TERMIOS
911 /* We output to stdout, we need size of stdout (not stdin)! */
912 get_terminal_width_height(STDOUT_FILENO, &col, &lines);
913 if (lines < 5 || col < 10) {
918 if (col > LINE_BUF_SIZE-2) /* +2 bytes for '\n', NUL, */
919 col = LINE_BUF_SIZE-2;
921 /* read process IDs & status for all the processes */
922 while ((p = procps_scan(p, scan_mask)) != NULL) {
924 #if ENABLE_FEATURE_TOPMEM
925 if (scan_mask != TOPMEM_MASK)
929 top = xrealloc_vector(top, 6, ntop++);
931 top[n].ppid = p->ppid;
933 #if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
934 top[n].ticks = p->stime + p->utime;
937 strcpy(top[n].state, p->state);
938 strcpy(top[n].comm, p->comm);
939 #if ENABLE_FEATURE_TOP_SMP_PROCESS
940 top[n].last_seen_on_cpu = p->last_seen_on_cpu;
943 #if ENABLE_FEATURE_TOPMEM
945 if (!(p->mapped_ro | p->mapped_rw))
946 continue; /* kernel threads are ignored */
948 /* No bug here - top and topmem are the same */
949 top = xrealloc_vector(topmem, 6, ntop++);
950 strcpy(topmem[n].comm, p->comm);
951 topmem[n].pid = p->pid;
952 topmem[n].vsz = p->mapped_rw + p->mapped_ro;
953 topmem[n].vszrw = p->mapped_rw;
954 topmem[n].rss_sh = p->shared_clean + p->shared_dirty;
955 topmem[n].rss = p->private_clean + p->private_dirty + topmem[n].rss_sh;
956 topmem[n].dirty = p->private_dirty + p->shared_dirty;
957 topmem[n].dirty_sh = p->shared_dirty;
958 topmem[n].stack = p->stack;
961 } /* end of "while we read /proc" */
963 bb_error_msg("no process info in /proc");
967 if (scan_mask != TOPMEM_MASK) {
968 #if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
969 if (!prev_hist_count) {
976 /* TODO: we don't need to sort all 10000 processes, we need to find top 24! */
977 qsort(top, ntop, sizeof(top_status_t), (void*)mult_lvl_cmp);
979 qsort(top, ntop, sizeof(top_status_t), (void*)(sort_function[0]));
982 #if ENABLE_FEATURE_TOPMEM
984 qsort(topmem, ntop, sizeof(topmem_status_t), (void*)topmem_sort);
988 if (OPT_BATCH_MODE) {
991 if (scan_mask != TOPMEM_MASK)
992 display_process_list(lines_rem, col);
993 #if ENABLE_FEATURE_TOPMEM
995 display_topmem_process_list(lines_rem, col);
998 if (iterations >= 0 && !--iterations)
1000 #if !ENABLE_FEATURE_USE_TERMIOS
1003 if (option_mask32 & (OPT_b|OPT_EOF))
1004 /* batch mode, or EOF on stdin ("top </dev/null") */
1006 else if (safe_poll(pfd, 1, interval * 1000) > 0) {
1007 if (safe_read(STDIN_FILENO, &c, 1) != 1) { /* error/EOF? */
1008 option_mask32 |= OPT_EOF;
1011 if (c == initial_settings.c_cc[VINTR])
1013 c |= 0x20; /* lowercase */
1017 IF_FEATURE_TOPMEM(scan_mask = TOP_MASK;)
1018 sort_function[0] = pid_sort;
1021 IF_FEATURE_TOPMEM(scan_mask = TOP_MASK;)
1022 sort_function[0] = mem_sort;
1023 # if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
1024 sort_function[1] = pcpu_sort;
1025 sort_function[2] = time_sort;
1028 # if ENABLE_FEATURE_SHOW_THREADS
1030 IF_FEATURE_TOPMEM(&& scan_mask != TOPMEM_MASK)
1032 scan_mask ^= PSSCAN_TASKS;
1035 # if ENABLE_FEATURE_TOP_CPU_USAGE_PERCENTAGE
1037 IF_FEATURE_TOPMEM(scan_mask = TOP_MASK;)
1038 sort_function[0] = pcpu_sort;
1039 sort_function[1] = mem_sort;
1040 sort_function[2] = time_sort;
1043 IF_FEATURE_TOPMEM(scan_mask = TOP_MASK;)
1044 sort_function[0] = time_sort;
1045 sort_function[1] = mem_sort;
1046 sort_function[2] = pcpu_sort;
1048 # if ENABLE_FEATURE_TOPMEM
1050 scan_mask = TOPMEM_MASK;
1053 prev_hist_count = 0;
1054 sort_field = (sort_field + 1) % NUM_SORT_FIELD;
1059 # if ENABLE_FEATURE_TOP_SMP_CPU
1060 /* procps-2.0.18 uses 'C', 3.2.7 uses '1' */
1061 if (c == 'c' || c == '1') {
1062 /* User wants to toggle per cpu <> aggregate */
1067 cpu_prev_jif = &prev_jif;
1069 /* Prepare for xrealloc() */
1070 cpu_jif = cpu_prev_jif = NULL;
1073 smp_cpu_info = !smp_cpu_info;
1079 #endif /* FEATURE_USE_TERMIOS */
1080 } /* end of "while (1)" */
1083 #if ENABLE_FEATURE_USE_TERMIOS
1086 return EXIT_SUCCESS;