source: MondoRescue/branches/3.3/mindi-busybox/procps/mpstat.c@ 3621

Last change on this file since 3621 was 3621, checked in by Bruno Cornec, 7 years ago

New 3?3 banch for incorporation of latest busybox 1.25. Changing minor version to handle potential incompatibilities.

  • Property svn:eol-style set to native
File size: 23.7 KB
Line 
1/* vi: set sw=4 ts=4: */
2/*
3 * Per-processor statistics, based on sysstat version 9.1.2 by Sebastien Godard
4 *
5 * Copyright (C) 2010 Marek Polacek <mmpolacek@gmail.com>
6 *
7 * Licensed under GPLv2, see file LICENSE in this source tree.
8 */
9
10//applet:IF_MPSTAT(APPLET(mpstat, BB_DIR_BIN, BB_SUID_DROP))
11
12//kbuild:lib-$(CONFIG_MPSTAT) += mpstat.o
13
14//config:config MPSTAT
15//config: bool "mpstat"
16//config: default y
17//config: help
18//config: Per-processor statistics
19
20#include "libbb.h"
21#include <sys/utsname.h> /* struct utsname */
22
23//#define debug(fmt, ...) fprintf(stderr, fmt, ## __VA_ARGS__)
24#define debug(fmt, ...) ((void)0)
25
26/* Size of /proc/interrupts line, CPU data excluded */
27#define INTERRUPTS_LINE 64
28/* Maximum number of interrupts */
29#define NR_IRQS 256
30#define NR_IRQCPU_PREALLOC 3
31#define MAX_IRQNAME_LEN 16
32#define MAX_PF_NAME 512
33/* sysstat 9.0.6 uses width 8, but newer code which also prints /proc/softirqs
34 * data needs more: "interrupts" in /proc/softirqs have longer names,
35 * most are up to 8 chars, one (BLOCK_IOPOLL) is even longer.
36 * We are printing headers in the " IRQNAME/s" form, experimentally
37 * anything smaller than 10 chars looks ugly for /proc/softirqs stats.
38 */
39#define INTRATE_SCRWIDTH 10
40#define INTRATE_SCRWIDTH_STR "10"
41
42/* System files */
43#define PROCFS_STAT "/proc/stat"
44#define PROCFS_INTERRUPTS "/proc/interrupts"
45#define PROCFS_SOFTIRQS "/proc/softirqs"
46#define PROCFS_UPTIME "/proc/uptime"
47
48
49#if 1
50typedef unsigned long long data_t;
51typedef long long idata_t;
52#define FMT_DATA "ll"
53#define DATA_MAX ULLONG_MAX
54#else
55typedef unsigned long data_t;
56typedef long idata_t;
57#define FMT_DATA "l"
58#define DATA_MAX ULONG_MAX
59#endif
60
61
62struct stats_irqcpu {
63 unsigned interrupts;
64 char irq_name[MAX_IRQNAME_LEN];
65};
66
67struct stats_cpu {
68 data_t cpu_user;
69 data_t cpu_nice;
70 data_t cpu_system;
71 data_t cpu_idle;
72 data_t cpu_iowait;
73 data_t cpu_steal;
74 data_t cpu_irq;
75 data_t cpu_softirq;
76 data_t cpu_guest;
77};
78
79struct stats_irq {
80 data_t irq_nr;
81};
82
83
84/* Globals. Sort by size and access frequency. */
85struct globals {
86 int interval;
87 int count;
88 unsigned cpu_nr; /* Number of CPUs */
89 unsigned irqcpu_nr; /* Number of interrupts per CPU */
90 unsigned softirqcpu_nr; /* Number of soft interrupts per CPU */
91 unsigned options;
92 unsigned hz;
93 unsigned cpu_bitmap_len;
94 smallint p_option;
95 // 9.0.6 does not do it. Try "mpstat -A 1 2" - headers are repeated!
96 //smallint header_done;
97 //smallint avg_header_done;
98 unsigned char *cpu_bitmap; /* Bit 0: global, bit 1: 1st proc... */
99 data_t global_uptime[3];
100 data_t per_cpu_uptime[3];
101 struct stats_cpu *st_cpu[3];
102 struct stats_irq *st_irq[3];
103 struct stats_irqcpu *st_irqcpu[3];
104 struct stats_irqcpu *st_softirqcpu[3];
105 struct tm timestamp[3];
106};
107#define G (*ptr_to_globals)
108#define INIT_G() do { \
109 SET_PTR_TO_GLOBALS(xzalloc(sizeof(G))); \
110} while (0)
111
112/* The selected interrupts statistics (bits in G.options) */
113enum {
114 D_CPU = 1 << 0,
115 D_IRQ_SUM = 1 << 1,
116 D_IRQ_CPU = 1 << 2,
117 D_SOFTIRQS = 1 << 3,
118};
119
120
121/* Is option on? */
122static ALWAYS_INLINE int display_opt(int opt)
123{
124 return (opt & G.options);
125}
126
127#if DATA_MAX > 0xffffffff
128/*
129 * Handle overflow conditions properly for counters which can have
130 * less bits than data_t, depending on the kernel version.
131 */
132/* Surprisingly, on 32bit inlining is a size win */
133static ALWAYS_INLINE data_t overflow_safe_sub(data_t prev, data_t curr)
134{
135 data_t v = curr - prev;
136
137 if ((idata_t)v < 0 /* curr < prev - counter overflow? */
138 && prev <= 0xffffffff /* kernel uses 32bit value for the counter? */
139 ) {
140 /* Add 33th bit set to 1 to curr, compensating for the overflow */
141 /* double shift defeats "warning: left shift count >= width of type" */
142 v += ((data_t)1 << 16) << 16;
143 }
144 return v;
145}
146#else
147static ALWAYS_INLINE data_t overflow_safe_sub(data_t prev, data_t curr)
148{
149 return curr - prev;
150}
151#endif
152
153static double percent_value(data_t prev, data_t curr, data_t itv)
154{
155 return ((double)overflow_safe_sub(prev, curr)) / itv * 100;
156}
157
158static double hz_value(data_t prev, data_t curr, data_t itv)
159{
160 //bb_error_msg("curr:%lld prev:%lld G.hz:%u", curr, prev, G.hz);
161 return ((double)overflow_safe_sub(prev, curr)) / itv * G.hz;
162}
163
164static ALWAYS_INLINE data_t jiffies_diff(data_t old, data_t new)
165{
166 data_t diff = new - old;
167 return (diff == 0) ? 1 : diff;
168}
169
170static int is_cpu_in_bitmap(unsigned cpu)
171{
172 return G.cpu_bitmap[cpu >> 3] & (1 << (cpu & 7));
173}
174
175static void write_irqcpu_stats(struct stats_irqcpu *per_cpu_stats[],
176 int total_irqs,
177 data_t itv,
178 int prev, int current,
179 const char *prev_str, const char *current_str)
180{
181 int j;
182 int offset, cpu;
183 struct stats_irqcpu *p0, *q0;
184
185 /* Check if number of IRQs has changed */
186 if (G.interval != 0) {
187 for (j = 0; j <= total_irqs; j++) {
188 p0 = &per_cpu_stats[current][j];
189 if (p0->irq_name[0] != '\0') {
190 q0 = &per_cpu_stats[prev][j];
191 if (strcmp(p0->irq_name, q0->irq_name) != 0) {
192 /* Strings are different */
193 break;
194 }
195 }
196 }
197 }
198
199 /* Print header */
200 printf("\n%-11s CPU", prev_str);
201 {
202 /* A bit complex code to "buy back" space if one header is too wide.
203 * Here's how it looks like. BLOCK_IOPOLL eats too much space,
204 * and latter headers use smaller width to compensate:
205 * ...BLOCK/s BLOCK_IOPOLL/s TASKLET/s SCHED/s HRTIMER/s RCU/s
206 * ... 2.32 0.00 0.01 17.58 0.14 141.96
207 */
208 int expected_len = 0;
209 int printed_len = 0;
210 for (j = 0; j < total_irqs; j++) {
211 p0 = &per_cpu_stats[current][j];
212 if (p0->irq_name[0] != '\0') {
213 int n = (INTRATE_SCRWIDTH-3) - (printed_len - expected_len);
214 printed_len += printf(" %*s/s", n > 0 ? n : 0, skip_whitespace(p0->irq_name));
215 expected_len += INTRATE_SCRWIDTH;
216 }
217 }
218 }
219 bb_putchar('\n');
220
221 for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
222 /* Check if we want stats about this CPU */
223 if (!is_cpu_in_bitmap(cpu) && G.p_option) {
224 continue;
225 }
226
227 printf("%-11s %4u", current_str, cpu - 1);
228
229 for (j = 0; j < total_irqs; j++) {
230 /* IRQ field set only for proc 0 */
231 p0 = &per_cpu_stats[current][j];
232
233 /*
234 * An empty string for irq name means that
235 * interrupt is no longer used.
236 */
237 if (p0->irq_name[0] != '\0') {
238 offset = j;
239 q0 = &per_cpu_stats[prev][offset];
240
241 /*
242 * If we want stats for the time since boot
243 * we have p0->irq != q0->irq.
244 */
245 if (strcmp(p0->irq_name, q0->irq_name) != 0
246 && G.interval != 0
247 ) {
248 if (j) {
249 offset = j - 1;
250 q0 = &per_cpu_stats[prev][offset];
251 }
252 if (strcmp(p0->irq_name, q0->irq_name) != 0
253 && (j + 1 < total_irqs)
254 ) {
255 offset = j + 1;
256 q0 = &per_cpu_stats[prev][offset];
257 }
258 }
259
260 if (strcmp(p0->irq_name, q0->irq_name) == 0
261 || G.interval == 0
262 ) {
263 struct stats_irqcpu *p, *q;
264 p = &per_cpu_stats[current][(cpu - 1) * total_irqs + j];
265 q = &per_cpu_stats[prev][(cpu - 1) * total_irqs + offset];
266 printf("%"INTRATE_SCRWIDTH_STR".2f",
267 (double)(p->interrupts - q->interrupts) / itv * G.hz);
268 } else {
269 printf(" N/A");
270 }
271 }
272 }
273 bb_putchar('\n');
274 }
275}
276
277static data_t get_per_cpu_interval(const struct stats_cpu *scc,
278 const struct stats_cpu *scp)
279{
280 return ((scc->cpu_user + scc->cpu_nice +
281 scc->cpu_system + scc->cpu_iowait +
282 scc->cpu_idle + scc->cpu_steal +
283 scc->cpu_irq + scc->cpu_softirq) -
284 (scp->cpu_user + scp->cpu_nice +
285 scp->cpu_system + scp->cpu_iowait +
286 scp->cpu_idle + scp->cpu_steal +
287 scp->cpu_irq + scp->cpu_softirq));
288}
289
290static void print_stats_cpu_struct(const struct stats_cpu *p,
291 const struct stats_cpu *c,
292 data_t itv)
293{
294 printf(" %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f\n",
295 percent_value(p->cpu_user - p->cpu_guest,
296 /**/ c->cpu_user - c->cpu_guest, itv),
297 percent_value(p->cpu_nice , c->cpu_nice , itv),
298 percent_value(p->cpu_system , c->cpu_system , itv),
299 percent_value(p->cpu_iowait , c->cpu_iowait , itv),
300 percent_value(p->cpu_irq , c->cpu_irq , itv),
301 percent_value(p->cpu_softirq, c->cpu_softirq, itv),
302 percent_value(p->cpu_steal , c->cpu_steal , itv),
303 percent_value(p->cpu_guest , c->cpu_guest , itv),
304 percent_value(p->cpu_idle , c->cpu_idle , itv)
305 );
306}
307
308static void write_stats_core(int prev, int current,
309 const char *prev_str, const char *current_str)
310{
311 struct stats_cpu *scc, *scp;
312 data_t itv, global_itv;
313 int cpu;
314
315 /* Compute time interval */
316 itv = global_itv = jiffies_diff(G.global_uptime[prev], G.global_uptime[current]);
317
318 /* Reduce interval to one CPU */
319 if (G.cpu_nr > 1)
320 itv = jiffies_diff(G.per_cpu_uptime[prev], G.per_cpu_uptime[current]);
321
322 /* Print CPU stats */
323 if (display_opt(D_CPU)) {
324
325 ///* This is done exactly once */
326 //if (!G.header_done) {
327 printf("\n%-11s CPU %%usr %%nice %%sys %%iowait %%irq %%soft %%steal %%guest %%idle\n",
328 prev_str
329 );
330 // G.header_done = 1;
331 //}
332
333 for (cpu = 0; cpu <= G.cpu_nr; cpu++) {
334 data_t per_cpu_itv;
335
336 /* Print stats about this particular CPU? */
337 if (!is_cpu_in_bitmap(cpu))
338 continue;
339
340 scc = &G.st_cpu[current][cpu];
341 scp = &G.st_cpu[prev][cpu];
342 per_cpu_itv = global_itv;
343
344 printf((cpu ? "%-11s %4u" : "%-11s all"), current_str, cpu - 1);
345 if (cpu) {
346 double idle;
347 /*
348 * If the CPU is offline, then it isn't in /proc/stat,
349 * so all values are 0.
350 * NB: Guest time is already included in user time.
351 */
352 if ((scc->cpu_user | scc->cpu_nice | scc->cpu_system |
353 scc->cpu_iowait | scc->cpu_idle | scc->cpu_steal |
354 scc->cpu_irq | scc->cpu_softirq) == 0
355 ) {
356 /*
357 * Set current struct fields to values from prev.
358 * iteration. Then their values won't jump from
359 * zero, when the CPU comes back online.
360 */
361 *scc = *scp;
362 idle = 0.0;
363 goto print_zeros;
364 }
365 /* Compute interval again for current proc */
366 per_cpu_itv = get_per_cpu_interval(scc, scp);
367 if (per_cpu_itv == 0) {
368 /*
369 * If the CPU is tickless then there is no change in CPU values
370 * but the sum of values is not zero.
371 */
372 idle = 100.0;
373 print_zeros:
374 printf(" %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f\n",
375 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, idle);
376 continue;
377 }
378 }
379 print_stats_cpu_struct(scp, scc, per_cpu_itv);
380 }
381 }
382
383 /* Print total number of IRQs per CPU */
384 if (display_opt(D_IRQ_SUM)) {
385
386 ///* Print average header, this is done exactly once */
387 //if (!G.avg_header_done) {
388 printf("\n%-11s CPU intr/s\n", prev_str);
389 // G.avg_header_done = 1;
390 //}
391
392 for (cpu = 0; cpu <= G.cpu_nr; cpu++) {
393 data_t per_cpu_itv;
394
395 /* Print stats about this CPU? */
396 if (!is_cpu_in_bitmap(cpu))
397 continue;
398
399 per_cpu_itv = itv;
400 printf((cpu ? "%-11s %4u" : "%-11s all"), current_str, cpu - 1);
401 if (cpu) {
402 scc = &G.st_cpu[current][cpu];
403 scp = &G.st_cpu[prev][cpu];
404 /* Compute interval again for current proc */
405 per_cpu_itv = get_per_cpu_interval(scc, scp);
406 if (per_cpu_itv == 0) {
407 printf(" %9.2f\n", 0.0);
408 continue;
409 }
410 }
411 //bb_error_msg("G.st_irq[%u][%u].irq_nr:%lld - G.st_irq[%u][%u].irq_nr:%lld",
412 // current, cpu, G.st_irq[prev][cpu].irq_nr, prev, cpu, G.st_irq[current][cpu].irq_nr);
413 printf(" %9.2f\n", hz_value(G.st_irq[prev][cpu].irq_nr, G.st_irq[current][cpu].irq_nr, per_cpu_itv));
414 }
415 }
416
417 if (display_opt(D_IRQ_CPU)) {
418 write_irqcpu_stats(G.st_irqcpu, G.irqcpu_nr,
419 itv,
420 prev, current,
421 prev_str, current_str
422 );
423 }
424
425 if (display_opt(D_SOFTIRQS)) {
426 write_irqcpu_stats(G.st_softirqcpu, G.softirqcpu_nr,
427 itv,
428 prev, current,
429 prev_str, current_str
430 );
431 }
432}
433
434/*
435 * Print the statistics
436 */
437static void write_stats(int current)
438{
439 char prev_time[16];
440 char curr_time[16];
441
442 strftime(prev_time, sizeof(prev_time), "%X", &G.timestamp[!current]);
443 strftime(curr_time, sizeof(curr_time), "%X", &G.timestamp[current]);
444
445 write_stats_core(!current, current, prev_time, curr_time);
446}
447
448static void write_stats_avg(int current)
449{
450 write_stats_core(2, current, "Average:", "Average:");
451}
452
453/*
454 * Read CPU statistics
455 */
456static void get_cpu_statistics(struct stats_cpu *cpu, data_t *up, data_t *up0)
457{
458 FILE *fp;
459 char buf[1024];
460
461 fp = xfopen_for_read(PROCFS_STAT);
462
463 while (fgets(buf, sizeof(buf), fp)) {
464 data_t sum;
465 unsigned cpu_number;
466 struct stats_cpu *cp;
467
468 if (!starts_with_cpu(buf))
469 continue; /* not "cpu" */
470
471 cp = cpu; /* for "cpu " case */
472 if (buf[3] != ' ') {
473 /* "cpuN " */
474 if (G.cpu_nr == 0
475 || sscanf(buf + 3, "%u ", &cpu_number) != 1
476 || cpu_number >= G.cpu_nr
477 ) {
478 continue;
479 }
480 cp = &cpu[cpu_number + 1];
481 }
482
483 /* Read the counters, save them */
484 /* Not all fields have to be present */
485 memset(cp, 0, sizeof(*cp));
486 sscanf(buf, "%*s"
487 " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u"
488 " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u"
489 " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u",
490 &cp->cpu_user, &cp->cpu_nice, &cp->cpu_system,
491 &cp->cpu_idle, &cp->cpu_iowait, &cp->cpu_irq,
492 &cp->cpu_softirq, &cp->cpu_steal, &cp->cpu_guest
493 );
494 /*
495 * Compute uptime in jiffies (1/HZ), it'll be the sum of
496 * individual CPU's uptimes.
497 * NB: We have to omit cpu_guest, because cpu_user includes it.
498 */
499 sum = cp->cpu_user + cp->cpu_nice + cp->cpu_system +
500 cp->cpu_idle + cp->cpu_iowait + cp->cpu_irq +
501 cp->cpu_softirq + cp->cpu_steal;
502
503 if (buf[3] == ' ') {
504 /* "cpu " */
505 *up = sum;
506 } else {
507 /* "cpuN " */
508 if (cpu_number == 0 && *up0 != 0) {
509 /* Compute uptime of single CPU */
510 *up0 = sum;
511 }
512 }
513 }
514 fclose(fp);
515}
516
517/*
518 * Read IRQs from /proc/stat
519 */
520static void get_irqs_from_stat(struct stats_irq *irq)
521{
522 FILE *fp;
523 char buf[1024];
524
525 fp = xfopen_for_read(PROCFS_STAT);
526
527 while (fgets(buf, sizeof(buf), fp)) {
528 //bb_error_msg("/proc/stat:'%s'", buf);
529 if (is_prefixed_with(buf, "intr ")) {
530 /* Read total number of IRQs since system boot */
531 sscanf(buf + 5, "%"FMT_DATA"u", &irq->irq_nr);
532 }
533 }
534
535 fclose(fp);
536}
537
538/*
539 * Read stats from /proc/interrupts or /proc/softirqs
540 */
541static void get_irqs_from_interrupts(const char *fname,
542 struct stats_irqcpu *per_cpu_stats[],
543 int irqs_per_cpu, int current)
544{
545 FILE *fp;
546 struct stats_irq *irq_i;
547 struct stats_irqcpu *ic;
548 char *buf;
549 unsigned buflen;
550 unsigned cpu;
551 unsigned irq;
552 int cpu_index[G.cpu_nr];
553 int iindex;
554
555// Moved to caller.
556// Otherwise reading of /proc/softirqs
557// was resetting counts to 0 after we painstakingly collected them from
558// /proc/interrupts. Which resulted in:
559// 01:32:47 PM CPU intr/s
560// 01:32:47 PM all 591.47
561// 01:32:47 PM 0 0.00 <= ???
562// 01:32:47 PM 1 0.00 <= ???
563// for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
564// G.st_irq[current][cpu].irq_nr = 0;
565// //bb_error_msg("G.st_irq[%u][%u].irq_nr=0", current, cpu);
566// }
567
568 fp = fopen_for_read(fname);
569 if (!fp)
570 return;
571
572 buflen = INTERRUPTS_LINE + 16 * G.cpu_nr;
573 buf = xmalloc(buflen);
574
575 /* Parse header and determine, which CPUs are online */
576 iindex = 0;
577 while (fgets(buf, buflen, fp)) {
578 char *cp, *next;
579 next = buf;
580 while ((cp = strstr(next, "CPU")) != NULL
581 && iindex < G.cpu_nr
582 ) {
583 cpu = strtoul(cp + 3, &next, 10);
584 cpu_index[iindex++] = cpu;
585 }
586 if (iindex) /* We found header */
587 break;
588 }
589
590 irq = 0;
591 while (fgets(buf, buflen, fp)
592 && irq < irqs_per_cpu
593 ) {
594 int len;
595 char last_char;
596 char *cp;
597
598 /* Skip over "IRQNAME:" */
599 cp = strchr(buf, ':');
600 if (!cp)
601 continue;
602 last_char = cp[-1];
603
604 ic = &per_cpu_stats[current][irq];
605 len = cp - buf;
606 if (len >= sizeof(ic->irq_name)) {
607 len = sizeof(ic->irq_name) - 1;
608 }
609 safe_strncpy(ic->irq_name, buf, len + 1);
610 //bb_error_msg("%s: irq%d:'%s' buf:'%s'", fname, irq, ic->irq_name, buf);
611 cp++;
612
613 for (cpu = 0; cpu < iindex; cpu++) {
614 char *next;
615 ic = &per_cpu_stats[current][cpu_index[cpu] * irqs_per_cpu + irq];
616 irq_i = &G.st_irq[current][cpu_index[cpu] + 1];
617 ic->interrupts = strtoul(cp, &next, 10);
618 /* Count only numerical IRQs */
619 if (isdigit(last_char)) {
620 irq_i->irq_nr += ic->interrupts;
621 //bb_error_msg("G.st_irq[%u][%u].irq_nr + %u = %lld",
622 // current, cpu_index[cpu] + 1, ic->interrupts, irq_i->irq_nr);
623 }
624 cp = next;
625 }
626 irq++;
627 }
628 fclose(fp);
629 free(buf);
630
631 while (irq < irqs_per_cpu) {
632 /* Number of interrupts per CPU has changed */
633 ic = &per_cpu_stats[current][irq];
634 ic->irq_name[0] = '\0'; /* False interrupt */
635 irq++;
636 }
637}
638
639static void get_uptime(data_t *uptime)
640{
641 FILE *fp;
642 char buf[sizeof(long)*3 * 2 + 4]; /* enough for long.long */
643 unsigned long uptime_sec, decimal;
644
645 fp = xfopen_for_read(PROCFS_UPTIME);
646 if (fgets(buf, sizeof(buf), fp)) {
647 if (sscanf(buf, "%lu.%lu", &uptime_sec, &decimal) == 2) {
648 *uptime = (data_t)uptime_sec * G.hz + decimal * G.hz / 100;
649 }
650 }
651
652 fclose(fp);
653}
654
655static void get_localtime(struct tm *tm)
656{
657 time_t timer;
658 time(&timer);
659 localtime_r(&timer, tm);
660}
661
662static void alarm_handler(int sig UNUSED_PARAM)
663{
664 signal(SIGALRM, alarm_handler);
665 alarm(G.interval);
666}
667
668static void main_loop(void)
669{
670 unsigned current;
671 unsigned cpus;
672
673 /* Read the stats */
674 if (G.cpu_nr > 1) {
675 G.per_cpu_uptime[0] = 0;
676 get_uptime(&G.per_cpu_uptime[0]);
677 }
678
679 get_cpu_statistics(G.st_cpu[0], &G.global_uptime[0], &G.per_cpu_uptime[0]);
680
681 if (display_opt(D_IRQ_SUM))
682 get_irqs_from_stat(G.st_irq[0]);
683
684 if (display_opt(D_IRQ_SUM | D_IRQ_CPU))
685 get_irqs_from_interrupts(PROCFS_INTERRUPTS, G.st_irqcpu,
686 G.irqcpu_nr, 0);
687
688 if (display_opt(D_SOFTIRQS))
689 get_irqs_from_interrupts(PROCFS_SOFTIRQS, G.st_softirqcpu,
690 G.softirqcpu_nr, 0);
691
692 if (G.interval == 0) {
693 /* Display since boot time */
694 cpus = G.cpu_nr + 1;
695 G.timestamp[1] = G.timestamp[0];
696 memset(G.st_cpu[1], 0, sizeof(G.st_cpu[1][0]) * cpus);
697 memset(G.st_irq[1], 0, sizeof(G.st_irq[1][0]) * cpus);
698 memset(G.st_irqcpu[1], 0, sizeof(G.st_irqcpu[1][0]) * cpus * G.irqcpu_nr);
699 memset(G.st_softirqcpu[1], 0, sizeof(G.st_softirqcpu[1][0]) * cpus * G.softirqcpu_nr);
700
701 write_stats(0);
702
703 /* And we're done */
704 return;
705 }
706
707 /* Set a handler for SIGALRM */
708 alarm_handler(0);
709
710 /* Save the stats we already have. We need them to compute the average */
711 G.timestamp[2] = G.timestamp[0];
712 G.global_uptime[2] = G.global_uptime[0];
713 G.per_cpu_uptime[2] = G.per_cpu_uptime[0];
714 cpus = G.cpu_nr + 1;
715 memcpy(G.st_cpu[2], G.st_cpu[0], sizeof(G.st_cpu[0][0]) * cpus);
716 memcpy(G.st_irq[2], G.st_irq[0], sizeof(G.st_irq[0][0]) * cpus);
717 memcpy(G.st_irqcpu[2], G.st_irqcpu[0], sizeof(G.st_irqcpu[0][0]) * cpus * G.irqcpu_nr);
718 if (display_opt(D_SOFTIRQS)) {
719 memcpy(G.st_softirqcpu[2], G.st_softirqcpu[0],
720 sizeof(G.st_softirqcpu[0][0]) * cpus * G.softirqcpu_nr);
721 }
722
723 current = 1;
724 while (1) {
725 /* Suspend until a signal is received */
726 pause();
727
728 /* Set structures to 0 to distinguish off/online CPUs */
729 memset(&G.st_cpu[current][/*cpu:*/ 1], 0, sizeof(G.st_cpu[0][0]) * G.cpu_nr);
730
731 get_localtime(&G.timestamp[current]);
732
733 /* Read stats */
734 if (G.cpu_nr > 1) {
735 G.per_cpu_uptime[current] = 0;
736 get_uptime(&G.per_cpu_uptime[current]);
737 }
738 get_cpu_statistics(G.st_cpu[current], &G.global_uptime[current], &G.per_cpu_uptime[current]);
739
740 if (display_opt(D_IRQ_SUM))
741 get_irqs_from_stat(G.st_irq[current]);
742
743 if (display_opt(D_IRQ_SUM | D_IRQ_CPU)) {
744 int cpu;
745 for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
746 G.st_irq[current][cpu].irq_nr = 0;
747 }
748 /* accumulates .irq_nr */
749 get_irqs_from_interrupts(PROCFS_INTERRUPTS, G.st_irqcpu,
750 G.irqcpu_nr, current);
751 }
752
753 if (display_opt(D_SOFTIRQS))
754 get_irqs_from_interrupts(PROCFS_SOFTIRQS,
755 G.st_softirqcpu,
756 G.softirqcpu_nr, current);
757
758 write_stats(current);
759
760 if (G.count > 0) {
761 if (--G.count == 0)
762 break;
763 }
764
765 current ^= 1;
766 }
767
768 /* Print average statistics */
769 write_stats_avg(current);
770}
771
772/* Initialization */
773
774static void alloc_struct(int cpus)
775{
776 int i;
777 for (i = 0; i < 3; i++) {
778 G.st_cpu[i] = xzalloc(sizeof(G.st_cpu[i][0]) * cpus);
779 G.st_irq[i] = xzalloc(sizeof(G.st_irq[i][0]) * cpus);
780 G.st_irqcpu[i] = xzalloc(sizeof(G.st_irqcpu[i][0]) * cpus * G.irqcpu_nr);
781 G.st_softirqcpu[i] = xzalloc(sizeof(G.st_softirqcpu[i][0]) * cpus * G.softirqcpu_nr);
782 }
783 G.cpu_bitmap_len = (cpus >> 3) + 1;
784 G.cpu_bitmap = xzalloc(G.cpu_bitmap_len);
785}
786
787static void print_header(struct tm *t)
788{
789 char cur_date[16];
790 struct utsname uts;
791
792 /* Get system name, release number and hostname */
793 uname(&uts);
794
795 strftime(cur_date, sizeof(cur_date), "%x", t);
796
797 printf("%s %s (%s)\t%s\t_%s_\t(%u CPU)\n",
798 uts.sysname, uts.release, uts.nodename, cur_date, uts.machine, G.cpu_nr);
799}
800
801/*
802 * Get number of interrupts available per processor
803 */
804static int get_irqcpu_nr(const char *f, int max_irqs)
805{
806 FILE *fp;
807 char *line;
808 unsigned linelen;
809 unsigned irq;
810
811 fp = fopen_for_read(f);
812 if (!fp) /* No interrupts file */
813 return 0;
814
815 linelen = INTERRUPTS_LINE + 16 * G.cpu_nr;
816 line = xmalloc(linelen);
817
818 irq = 0;
819 while (fgets(line, linelen, fp)
820 && irq < max_irqs
821 ) {
822 int p = strcspn(line, ":");
823 if ((p > 0) && (p < 16))
824 irq++;
825 }
826
827 fclose(fp);
828 free(line);
829
830 return irq;
831}
832
833//usage:#define mpstat_trivial_usage
834//usage: "[-A] [-I SUM|CPU|ALL|SCPU] [-u] [-P num|ALL] [INTERVAL [COUNT]]"
835//usage:#define mpstat_full_usage "\n\n"
836//usage: "Per-processor statistics\n"
837//usage: "\n -A Same as -I ALL -u -P ALL"
838//usage: "\n -I SUM|CPU|ALL|SCPU Report interrupt statistics"
839//usage: "\n -P num|ALL Processor to monitor"
840//usage: "\n -u Report CPU utilization"
841
842int mpstat_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
843int mpstat_main(int UNUSED_PARAM argc, char **argv)
844{
845 char *opt_irq_fmt;
846 char *opt_set_cpu;
847 int i, opt;
848 enum {
849 OPT_ALL = 1 << 0, /* -A */
850 OPT_INTS = 1 << 1, /* -I */
851 OPT_SETCPU = 1 << 2, /* -P */
852 OPT_UTIL = 1 << 3, /* -u */
853 };
854
855 /* Dont buffer data if redirected to a pipe */
856 setbuf(stdout, NULL);
857
858 INIT_G();
859
860 G.interval = -1;
861
862 /* Get number of processors */
863 G.cpu_nr = get_cpu_count();
864
865 /* Get number of clock ticks per sec */
866 G.hz = bb_clk_tck();
867
868 /* Calculate number of interrupts per processor */
869 G.irqcpu_nr = get_irqcpu_nr(PROCFS_INTERRUPTS, NR_IRQS) + NR_IRQCPU_PREALLOC;
870
871 /* Calculate number of soft interrupts per processor */
872 G.softirqcpu_nr = get_irqcpu_nr(PROCFS_SOFTIRQS, NR_IRQS) + NR_IRQCPU_PREALLOC;
873
874 /* Allocate space for structures. + 1 for global structure. */
875 alloc_struct(G.cpu_nr + 1);
876
877 /* Parse and process arguments */
878 opt = getopt32(argv, "AI:P:u", &opt_irq_fmt, &opt_set_cpu);
879 argv += optind;
880
881 if (*argv) {
882 /* Get interval */
883 G.interval = xatoi_positive(*argv);
884 G.count = -1;
885 argv++;
886 if (*argv) {
887 /* Get count value */
888 if (G.interval == 0)
889 bb_show_usage();
890 G.count = xatoi_positive(*argv);
891 //if (*++argv)
892 // bb_show_usage();
893 }
894 }
895 if (G.interval < 0)
896 G.interval = 0;
897
898 if (opt & OPT_ALL) {
899 G.p_option = 1;
900 G.options |= D_CPU + D_IRQ_SUM + D_IRQ_CPU + D_SOFTIRQS;
901 /* Select every CPU */
902 memset(G.cpu_bitmap, 0xff, G.cpu_bitmap_len);
903 }
904
905 if (opt & OPT_INTS) {
906 static const char v[] = {
907 D_IRQ_CPU, D_IRQ_SUM, D_SOFTIRQS,
908 D_IRQ_SUM + D_IRQ_CPU + D_SOFTIRQS
909 };
910 i = index_in_strings("CPU\0SUM\0SCPU\0ALL\0", opt_irq_fmt);
911 if (i == -1)
912 bb_show_usage();
913 G.options |= v[i];
914 }
915
916 if ((opt & OPT_UTIL) /* -u? */
917 || G.options == 0 /* nothing? (use default then) */
918 ) {
919 G.options |= D_CPU;
920 }
921
922 if (opt & OPT_SETCPU) {
923 char *t;
924 G.p_option = 1;
925
926 for (t = strtok(opt_set_cpu, ","); t; t = strtok(NULL, ",")) {
927 if (strcmp(t, "ALL") == 0) {
928 /* Select every CPU */
929 memset(G.cpu_bitmap, 0xff, G.cpu_bitmap_len);
930 } else {
931 /* Get CPU number */
932 unsigned n = xatoi_positive(t);
933 if (n >= G.cpu_nr)
934 bb_error_msg_and_die("not that many processors");
935 n++;
936 G.cpu_bitmap[n >> 3] |= 1 << (n & 7);
937 }
938 }
939 }
940
941 if (!G.p_option)
942 /* Display global stats */
943 G.cpu_bitmap[0] = 1;
944
945 /* Get time */
946 get_localtime(&G.timestamp[0]);
947
948 /* Display header */
949 print_header(&G.timestamp[0]);
950
951 /* The main loop */
952 main_loop();
953
954 if (ENABLE_FEATURE_CLEAN_UP) {
955 /* Clean up */
956 for (i = 0; i < 3; i++) {
957 free(G.st_cpu[i]);
958 free(G.st_irq[i]);
959 free(G.st_irqcpu[i]);
960 free(G.st_softirqcpu[i]);
961 }
962 free(G.cpu_bitmap);
963 free(&G);
964 }
965
966 return EXIT_SUCCESS;
967}
Note: See TracBrowser for help on using the repository browser.