source: MondoRescue/branches/3.2/mindi-busybox/procps/mpstat.c@ 3232

Last change on this file since 3232 was 3232, checked in by Bruno Cornec, 10 years ago
  • Update mindi-busybox to 1.21.1
  • Property svn:eol-style set to native
File size: 23.8 KB
Line 
1/* vi: set sw=4 ts=4: */
2/*
3 * Per-processor statistics, based on sysstat version 9.1.2 by Sebastien Godard
4 *
5 * Copyright (C) 2010 Marek Polacek <mmpolacek@gmail.com>
6 *
7 * Licensed under GPLv2, see file LICENSE in this source tree.
8 */
9
10//applet:IF_MPSTAT(APPLET(mpstat, BB_DIR_BIN, BB_SUID_DROP))
11
12//kbuild:lib-$(CONFIG_MPSTAT) += mpstat.o
13
14//config:config MPSTAT
15//config: bool "mpstat"
16//config: default y
17//config: help
18//config: Per-processor statistics
19
20#include "libbb.h"
21#include <sys/utsname.h> /* struct utsname */
22
23//#define debug(fmt, ...) fprintf(stderr, fmt, ## __VA_ARGS__)
24#define debug(fmt, ...) ((void)0)
25
26/* Size of /proc/interrupts line, CPU data excluded */
27#define INTERRUPTS_LINE 64
28/* Maximum number of interrupts */
29#define NR_IRQS 256
30#define NR_IRQCPU_PREALLOC 3
31#define MAX_IRQNAME_LEN 16
32#define MAX_PF_NAME 512
33/* sysstat 9.0.6 uses width 8, but newer code which also prints /proc/softirqs
34 * data needs more: "interrupts" in /proc/softirqs have longer names,
35 * most are up to 8 chars, one (BLOCK_IOPOLL) is even longer.
36 * We are printing headers in the " IRQNAME/s" form, experimentally
37 * anything smaller than 10 chars looks ugly for /proc/softirqs stats.
38 */
39#define INTRATE_SCRWIDTH 10
40#define INTRATE_SCRWIDTH_STR "10"
41
42/* System files */
43#define PROCFS_STAT "/proc/stat"
44#define PROCFS_INTERRUPTS "/proc/interrupts"
45#define PROCFS_SOFTIRQS "/proc/softirqs"
46#define PROCFS_UPTIME "/proc/uptime"
47
48
49#if 1
50typedef unsigned long long data_t;
51typedef long long idata_t;
52#define FMT_DATA "ll"
53#define DATA_MAX ULLONG_MAX
54#else
55typedef unsigned long data_t;
56typedef long idata_t;
57#define FMT_DATA "l"
58#define DATA_MAX ULONG_MAX
59#endif
60
61
62struct stats_irqcpu {
63 unsigned interrupts;
64 char irq_name[MAX_IRQNAME_LEN];
65};
66
67struct stats_cpu {
68 data_t cpu_user;
69 data_t cpu_nice;
70 data_t cpu_system;
71 data_t cpu_idle;
72 data_t cpu_iowait;
73 data_t cpu_steal;
74 data_t cpu_irq;
75 data_t cpu_softirq;
76 data_t cpu_guest;
77};
78
79struct stats_irq {
80 data_t irq_nr;
81};
82
83
84/* Globals. Sort by size and access frequency. */
85struct globals {
86 int interval;
87 int count;
88 unsigned cpu_nr; /* Number of CPUs */
89 unsigned irqcpu_nr; /* Number of interrupts per CPU */
90 unsigned softirqcpu_nr; /* Number of soft interrupts per CPU */
91 unsigned options;
92 unsigned hz;
93 unsigned cpu_bitmap_len;
94 smallint p_option;
95 // 9.0.6 does not do it. Try "mpstat -A 1 2" - headers are repeated!
96 //smallint header_done;
97 //smallint avg_header_done;
98 unsigned char *cpu_bitmap; /* Bit 0: global, bit 1: 1st proc... */
99 data_t global_uptime[3];
100 data_t per_cpu_uptime[3];
101 struct stats_cpu *st_cpu[3];
102 struct stats_irq *st_irq[3];
103 struct stats_irqcpu *st_irqcpu[3];
104 struct stats_irqcpu *st_softirqcpu[3];
105 struct tm timestamp[3];
106};
107#define G (*ptr_to_globals)
108#define INIT_G() do { \
109 SET_PTR_TO_GLOBALS(xzalloc(sizeof(G))); \
110} while (0)
111
112/* The selected interrupts statistics (bits in G.options) */
113enum {
114 D_CPU = 1 << 0,
115 D_IRQ_SUM = 1 << 1,
116 D_IRQ_CPU = 1 << 2,
117 D_SOFTIRQS = 1 << 3,
118};
119
120
121/* Is option on? */
122static ALWAYS_INLINE int display_opt(int opt)
123{
124 return (opt & G.options);
125}
126
127#if DATA_MAX > 0xffffffff
128/*
129 * Handle overflow conditions properly for counters which can have
130 * less bits than data_t, depending on the kernel version.
131 */
132/* Surprisingly, on 32bit inlining is a size win */
133static ALWAYS_INLINE data_t overflow_safe_sub(data_t prev, data_t curr)
134{
135 data_t v = curr - prev;
136
137 if ((idata_t)v < 0 /* curr < prev - counter overflow? */
138 && prev <= 0xffffffff /* kernel uses 32bit value for the counter? */
139 ) {
140 /* Add 33th bit set to 1 to curr, compensating for the overflow */
141 /* double shift defeats "warning: left shift count >= width of type" */
142 v += ((data_t)1 << 16) << 16;
143 }
144 return v;
145}
146#else
147static ALWAYS_INLINE data_t overflow_safe_sub(data_t prev, data_t curr)
148{
149 return curr - prev;
150}
151#endif
152
153static double percent_value(data_t prev, data_t curr, data_t itv)
154{
155 return ((double)overflow_safe_sub(prev, curr)) / itv * 100;
156}
157
158static double hz_value(data_t prev, data_t curr, data_t itv)
159{
160 //bb_error_msg("curr:%lld prev:%lld G.hz:%u", curr, prev, G.hz);
161 return ((double)overflow_safe_sub(prev, curr)) / itv * G.hz;
162}
163
164static ALWAYS_INLINE data_t jiffies_diff(data_t old, data_t new)
165{
166 data_t diff = new - old;
167 return (diff == 0) ? 1 : diff;
168}
169
170static int is_cpu_in_bitmap(unsigned cpu)
171{
172 return G.cpu_bitmap[cpu >> 3] & (1 << (cpu & 7));
173}
174
175static void write_irqcpu_stats(struct stats_irqcpu *per_cpu_stats[],
176 int total_irqs,
177 data_t itv,
178 int prev, int current,
179 const char *prev_str, const char *current_str)
180{
181 int j;
182 int offset, cpu;
183 struct stats_irqcpu *p0, *q0;
184
185 /* Check if number of IRQs has changed */
186 if (G.interval != 0) {
187 for (j = 0; j <= total_irqs; j++) {
188 p0 = &per_cpu_stats[current][j];
189 if (p0->irq_name[0] != '\0') {
190 q0 = &per_cpu_stats[prev][j];
191 if (strcmp(p0->irq_name, q0->irq_name) != 0) {
192 /* Strings are different */
193 break;
194 }
195 }
196 }
197 }
198
199 /* Print header */
200 printf("\n%-11s CPU", prev_str);
201 {
202 /* A bit complex code to "buy back" space if one header is too wide.
203 * Here's how it looks like. BLOCK_IOPOLL eats too much space,
204 * and latter headers use smaller width to compensate:
205 * ...BLOCK/s BLOCK_IOPOLL/s TASKLET/s SCHED/s HRTIMER/s RCU/s
206 * ... 2.32 0.00 0.01 17.58 0.14 141.96
207 */
208 int expected_len = 0;
209 int printed_len = 0;
210 for (j = 0; j < total_irqs; j++) {
211 p0 = &per_cpu_stats[current][j];
212 if (p0->irq_name[0] != '\0') {
213 int n = (INTRATE_SCRWIDTH-3) - (printed_len - expected_len);
214 printed_len += printf(" %*s/s", n > 0 ? n : 0, skip_whitespace(p0->irq_name));
215 expected_len += INTRATE_SCRWIDTH;
216 }
217 }
218 }
219 bb_putchar('\n');
220
221 for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
222 /* Check if we want stats about this CPU */
223 if (!is_cpu_in_bitmap(cpu) && G.p_option) {
224 continue;
225 }
226
227 printf("%-11s %4u", current_str, cpu - 1);
228
229 for (j = 0; j < total_irqs; j++) {
230 /* IRQ field set only for proc 0 */
231 p0 = &per_cpu_stats[current][j];
232
233 /*
234 * An empty string for irq name means that
235 * interrupt is no longer used.
236 */
237 if (p0->irq_name[0] != '\0') {
238 offset = j;
239 q0 = &per_cpu_stats[prev][offset];
240
241 /*
242 * If we want stats for the time since boot
243 * we have p0->irq != q0->irq.
244 */
245 if (strcmp(p0->irq_name, q0->irq_name) != 0
246 && G.interval != 0
247 ) {
248 if (j) {
249 offset = j - 1;
250 q0 = &per_cpu_stats[prev][offset];
251 }
252 if (strcmp(p0->irq_name, q0->irq_name) != 0
253 && (j + 1 < total_irqs)
254 ) {
255 offset = j + 1;
256 q0 = &per_cpu_stats[prev][offset];
257 }
258 }
259
260 if (strcmp(p0->irq_name, q0->irq_name) == 0
261 || G.interval == 0
262 ) {
263 struct stats_irqcpu *p, *q;
264 p = &per_cpu_stats[current][(cpu - 1) * total_irqs + j];
265 q = &per_cpu_stats[prev][(cpu - 1) * total_irqs + offset];
266 printf("%"INTRATE_SCRWIDTH_STR".2f",
267 (double)(p->interrupts - q->interrupts) / itv * G.hz);
268 } else {
269 printf(" N/A");
270 }
271 }
272 }
273 bb_putchar('\n');
274 }
275}
276
277static data_t get_per_cpu_interval(const struct stats_cpu *scc,
278 const struct stats_cpu *scp)
279{
280 return ((scc->cpu_user + scc->cpu_nice +
281 scc->cpu_system + scc->cpu_iowait +
282 scc->cpu_idle + scc->cpu_steal +
283 scc->cpu_irq + scc->cpu_softirq) -
284 (scp->cpu_user + scp->cpu_nice +
285 scp->cpu_system + scp->cpu_iowait +
286 scp->cpu_idle + scp->cpu_steal +
287 scp->cpu_irq + scp->cpu_softirq));
288}
289
290static void print_stats_cpu_struct(const struct stats_cpu *p,
291 const struct stats_cpu *c,
292 data_t itv)
293{
294 printf(" %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f\n",
295 percent_value(p->cpu_user - p->cpu_guest,
296 /**/ c->cpu_user - c->cpu_guest, itv),
297 percent_value(p->cpu_nice , c->cpu_nice , itv),
298 percent_value(p->cpu_system , c->cpu_system , itv),
299 percent_value(p->cpu_iowait , c->cpu_iowait , itv),
300 percent_value(p->cpu_irq , c->cpu_irq , itv),
301 percent_value(p->cpu_softirq, c->cpu_softirq, itv),
302 percent_value(p->cpu_steal , c->cpu_steal , itv),
303 percent_value(p->cpu_guest , c->cpu_guest , itv),
304 percent_value(p->cpu_idle , c->cpu_idle , itv)
305 );
306}
307
308static void write_stats_core(int prev, int current,
309 const char *prev_str, const char *current_str)
310{
311 struct stats_cpu *scc, *scp;
312 data_t itv, global_itv;
313 int cpu;
314
315 /* Compute time interval */
316 itv = global_itv = jiffies_diff(G.global_uptime[prev], G.global_uptime[current]);
317
318 /* Reduce interval to one CPU */
319 if (G.cpu_nr > 1)
320 itv = jiffies_diff(G.per_cpu_uptime[prev], G.per_cpu_uptime[current]);
321
322 /* Print CPU stats */
323 if (display_opt(D_CPU)) {
324
325 ///* This is done exactly once */
326 //if (!G.header_done) {
327 printf("\n%-11s CPU %%usr %%nice %%sys %%iowait %%irq %%soft %%steal %%guest %%idle\n",
328 prev_str
329 );
330 // G.header_done = 1;
331 //}
332
333 for (cpu = 0; cpu <= G.cpu_nr; cpu++) {
334 data_t per_cpu_itv;
335
336 /* Print stats about this particular CPU? */
337 if (!is_cpu_in_bitmap(cpu))
338 continue;
339
340 scc = &G.st_cpu[current][cpu];
341 scp = &G.st_cpu[prev][cpu];
342 per_cpu_itv = global_itv;
343
344 printf((cpu ? "%-11s %4u" : "%-11s all"), current_str, cpu - 1);
345 if (cpu) {
346 double idle;
347 /*
348 * If the CPU is offline, then it isn't in /proc/stat,
349 * so all values are 0.
350 * NB: Guest time is already included in user time.
351 */
352 if ((scc->cpu_user | scc->cpu_nice | scc->cpu_system |
353 scc->cpu_iowait | scc->cpu_idle | scc->cpu_steal |
354 scc->cpu_irq | scc->cpu_softirq) == 0
355 ) {
356 /*
357 * Set current struct fields to values from prev.
358 * iteration. Then their values won't jump from
359 * zero, when the CPU comes back online.
360 */
361 *scc = *scp;
362 idle = 0.0;
363 goto print_zeros;
364 }
365 /* Compute interval again for current proc */
366 per_cpu_itv = get_per_cpu_interval(scc, scp);
367 if (per_cpu_itv == 0) {
368 /*
369 * If the CPU is tickless then there is no change in CPU values
370 * but the sum of values is not zero.
371 */
372 idle = 100.0;
373 print_zeros:
374 printf(" %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f %7.2f\n",
375 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, idle);
376 continue;
377 }
378 }
379 print_stats_cpu_struct(scp, scc, per_cpu_itv);
380 }
381 }
382
383 /* Print total number of IRQs per CPU */
384 if (display_opt(D_IRQ_SUM)) {
385
386 ///* Print average header, this is done exactly once */
387 //if (!G.avg_header_done) {
388 printf("\n%-11s CPU intr/s\n", prev_str);
389 // G.avg_header_done = 1;
390 //}
391
392 for (cpu = 0; cpu <= G.cpu_nr; cpu++) {
393 data_t per_cpu_itv;
394
395 /* Print stats about this CPU? */
396 if (!is_cpu_in_bitmap(cpu))
397 continue;
398
399 per_cpu_itv = itv;
400 printf((cpu ? "%-11s %4u" : "%-11s all"), current_str, cpu - 1);
401 if (cpu) {
402 scc = &G.st_cpu[current][cpu];
403 scp = &G.st_cpu[prev][cpu];
404 /* Compute interval again for current proc */
405 per_cpu_itv = get_per_cpu_interval(scc, scp);
406 if (per_cpu_itv == 0) {
407 printf(" %9.2f\n", 0.0);
408 continue;
409 }
410 }
411 //bb_error_msg("G.st_irq[%u][%u].irq_nr:%lld - G.st_irq[%u][%u].irq_nr:%lld",
412 // current, cpu, G.st_irq[prev][cpu].irq_nr, prev, cpu, G.st_irq[current][cpu].irq_nr);
413 printf(" %9.2f\n", hz_value(G.st_irq[prev][cpu].irq_nr, G.st_irq[current][cpu].irq_nr, per_cpu_itv));
414 }
415 }
416
417 if (display_opt(D_IRQ_CPU)) {
418 write_irqcpu_stats(G.st_irqcpu, G.irqcpu_nr,
419 itv,
420 prev, current,
421 prev_str, current_str
422 );
423 }
424
425 if (display_opt(D_SOFTIRQS)) {
426 write_irqcpu_stats(G.st_softirqcpu, G.softirqcpu_nr,
427 itv,
428 prev, current,
429 prev_str, current_str
430 );
431 }
432}
433
434/*
435 * Print the statistics
436 */
437static void write_stats(int current)
438{
439 char prev_time[16];
440 char curr_time[16];
441
442 strftime(prev_time, sizeof(prev_time), "%X", &G.timestamp[!current]);
443 strftime(curr_time, sizeof(curr_time), "%X", &G.timestamp[current]);
444
445 write_stats_core(!current, current, prev_time, curr_time);
446}
447
448static void write_stats_avg(int current)
449{
450 write_stats_core(2, current, "Average:", "Average:");
451}
452
453/*
454 * Read CPU statistics
455 */
456static void get_cpu_statistics(struct stats_cpu *cpu, data_t *up, data_t *up0)
457{
458 FILE *fp;
459 char buf[1024];
460
461 fp = xfopen_for_read(PROCFS_STAT);
462
463 while (fgets(buf, sizeof(buf), fp)) {
464 data_t sum;
465 unsigned cpu_number;
466 struct stats_cpu *cp;
467
468 if (!starts_with_cpu(buf))
469 continue; /* not "cpu" */
470
471 cp = cpu; /* for "cpu " case */
472 if (buf[3] != ' ') {
473 /* "cpuN " */
474 if (G.cpu_nr == 0
475 || sscanf(buf + 3, "%u ", &cpu_number) != 1
476 || cpu_number >= G.cpu_nr
477 ) {
478 continue;
479 }
480 cp = &cpu[cpu_number + 1];
481 }
482
483 /* Read the counters, save them */
484 /* Not all fields have to be present */
485 memset(cp, 0, sizeof(*cp));
486 sscanf(buf, "%*s"
487 " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u"
488 " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u"
489 " %"FMT_DATA"u %"FMT_DATA"u %"FMT_DATA"u",
490 &cp->cpu_user, &cp->cpu_nice, &cp->cpu_system,
491 &cp->cpu_idle, &cp->cpu_iowait, &cp->cpu_irq,
492 &cp->cpu_softirq, &cp->cpu_steal, &cp->cpu_guest
493 );
494 /*
495 * Compute uptime in jiffies (1/HZ), it'll be the sum of
496 * individual CPU's uptimes.
497 * NB: We have to omit cpu_guest, because cpu_user includes it.
498 */
499 sum = cp->cpu_user + cp->cpu_nice + cp->cpu_system +
500 cp->cpu_idle + cp->cpu_iowait + cp->cpu_irq +
501 cp->cpu_softirq + cp->cpu_steal;
502
503 if (buf[3] == ' ') {
504 /* "cpu " */
505 *up = sum;
506 } else {
507 /* "cpuN " */
508 if (cpu_number == 0 && *up0 != 0) {
509 /* Compute uptime of single CPU */
510 *up0 = sum;
511 }
512 }
513 }
514 fclose(fp);
515}
516
517/*
518 * Read IRQs from /proc/stat
519 */
520static void get_irqs_from_stat(struct stats_irq *irq)
521{
522 FILE *fp;
523 char buf[1024];
524
525 fp = fopen_for_read(PROCFS_STAT);
526 if (!fp)
527 return;
528
529 while (fgets(buf, sizeof(buf), fp)) {
530 //bb_error_msg("/proc/stat:'%s'", buf);
531 if (strncmp(buf, "intr ", 5) == 0) {
532 /* Read total number of IRQs since system boot */
533 sscanf(buf + 5, "%"FMT_DATA"u", &irq->irq_nr);
534 }
535 }
536
537 fclose(fp);
538}
539
540/*
541 * Read stats from /proc/interrupts or /proc/softirqs
542 */
543static void get_irqs_from_interrupts(const char *fname,
544 struct stats_irqcpu *per_cpu_stats[],
545 int irqs_per_cpu, int current)
546{
547 FILE *fp;
548 struct stats_irq *irq_i;
549 struct stats_irqcpu *ic;
550 char *buf;
551 unsigned buflen;
552 unsigned cpu;
553 unsigned irq;
554 int cpu_index[G.cpu_nr];
555 int iindex;
556
557// Moved to caller.
558// Otherwise reading of /proc/softirqs
559// was resetting counts to 0 after we painstakingly collected them from
560// /proc/interrupts. Which resulted in:
561// 01:32:47 PM CPU intr/s
562// 01:32:47 PM all 591.47
563// 01:32:47 PM 0 0.00 <= ???
564// 01:32:47 PM 1 0.00 <= ???
565// for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
566// G.st_irq[current][cpu].irq_nr = 0;
567// //bb_error_msg("G.st_irq[%u][%u].irq_nr=0", current, cpu);
568// }
569
570 fp = fopen_for_read(fname);
571 if (!fp)
572 return;
573
574 buflen = INTERRUPTS_LINE + 16 * G.cpu_nr;
575 buf = xmalloc(buflen);
576
577 /* Parse header and determine, which CPUs are online */
578 iindex = 0;
579 while (fgets(buf, buflen, fp)) {
580 char *cp, *next;
581 next = buf;
582 while ((cp = strstr(next, "CPU")) != NULL
583 && iindex < G.cpu_nr
584 ) {
585 cpu = strtoul(cp + 3, &next, 10);
586 cpu_index[iindex++] = cpu;
587 }
588 if (iindex) /* We found header */
589 break;
590 }
591
592 irq = 0;
593 while (fgets(buf, buflen, fp)
594 && irq < irqs_per_cpu
595 ) {
596 int len;
597 char last_char;
598 char *cp;
599
600 /* Skip over "IRQNAME:" */
601 cp = strchr(buf, ':');
602 if (!cp)
603 continue;
604 last_char = cp[-1];
605
606 ic = &per_cpu_stats[current][irq];
607 len = cp - buf;
608 if (len >= sizeof(ic->irq_name)) {
609 len = sizeof(ic->irq_name) - 1;
610 }
611 safe_strncpy(ic->irq_name, buf, len + 1);
612 //bb_error_msg("%s: irq%d:'%s' buf:'%s'", fname, irq, ic->irq_name, buf);
613 cp++;
614
615 for (cpu = 0; cpu < iindex; cpu++) {
616 char *next;
617 ic = &per_cpu_stats[current][cpu_index[cpu] * irqs_per_cpu + irq];
618 irq_i = &G.st_irq[current][cpu_index[cpu] + 1];
619 ic->interrupts = strtoul(cp, &next, 10);
620 /* Count only numerical IRQs */
621 if (isdigit(last_char)) {
622 irq_i->irq_nr += ic->interrupts;
623 //bb_error_msg("G.st_irq[%u][%u].irq_nr + %u = %lld",
624 // current, cpu_index[cpu] + 1, ic->interrupts, irq_i->irq_nr);
625 }
626 cp = next;
627 }
628 irq++;
629 }
630 fclose(fp);
631 free(buf);
632
633 while (irq < irqs_per_cpu) {
634 /* Number of interrupts per CPU has changed */
635 ic = &per_cpu_stats[current][irq];
636 ic->irq_name[0] = '\0'; /* False interrupt */
637 irq++;
638 }
639}
640
641static void get_uptime(data_t *uptime)
642{
643 FILE *fp;
644 char buf[sizeof(long)*3 * 2 + 4]; /* enough for long.long */
645 unsigned long uptime_sec, decimal;
646
647 fp = fopen_for_read(PROCFS_UPTIME);
648 if (!fp)
649 return;
650 if (fgets(buf, sizeof(buf), fp)) {
651 if (sscanf(buf, "%lu.%lu", &uptime_sec, &decimal) == 2) {
652 *uptime = (data_t)uptime_sec * G.hz + decimal * G.hz / 100;
653 }
654 }
655
656 fclose(fp);
657}
658
659static void get_localtime(struct tm *tm)
660{
661 time_t timer;
662 time(&timer);
663 localtime_r(&timer, tm);
664}
665
666static void alarm_handler(int sig UNUSED_PARAM)
667{
668 signal(SIGALRM, alarm_handler);
669 alarm(G.interval);
670}
671
672static void main_loop(void)
673{
674 unsigned current;
675 unsigned cpus;
676
677 /* Read the stats */
678 if (G.cpu_nr > 1) {
679 G.per_cpu_uptime[0] = 0;
680 get_uptime(&G.per_cpu_uptime[0]);
681 }
682
683 get_cpu_statistics(G.st_cpu[0], &G.global_uptime[0], &G.per_cpu_uptime[0]);
684
685 if (display_opt(D_IRQ_SUM))
686 get_irqs_from_stat(G.st_irq[0]);
687
688 if (display_opt(D_IRQ_SUM | D_IRQ_CPU))
689 get_irqs_from_interrupts(PROCFS_INTERRUPTS, G.st_irqcpu,
690 G.irqcpu_nr, 0);
691
692 if (display_opt(D_SOFTIRQS))
693 get_irqs_from_interrupts(PROCFS_SOFTIRQS, G.st_softirqcpu,
694 G.softirqcpu_nr, 0);
695
696 if (G.interval == 0) {
697 /* Display since boot time */
698 cpus = G.cpu_nr + 1;
699 G.timestamp[1] = G.timestamp[0];
700 memset(G.st_cpu[1], 0, sizeof(G.st_cpu[1][0]) * cpus);
701 memset(G.st_irq[1], 0, sizeof(G.st_irq[1][0]) * cpus);
702 memset(G.st_irqcpu[1], 0, sizeof(G.st_irqcpu[1][0]) * cpus * G.irqcpu_nr);
703 memset(G.st_softirqcpu[1], 0, sizeof(G.st_softirqcpu[1][0]) * cpus * G.softirqcpu_nr);
704
705 write_stats(0);
706
707 /* And we're done */
708 return;
709 }
710
711 /* Set a handler for SIGALRM */
712 alarm_handler(0);
713
714 /* Save the stats we already have. We need them to compute the average */
715 G.timestamp[2] = G.timestamp[0];
716 G.global_uptime[2] = G.global_uptime[0];
717 G.per_cpu_uptime[2] = G.per_cpu_uptime[0];
718 cpus = G.cpu_nr + 1;
719 memcpy(G.st_cpu[2], G.st_cpu[0], sizeof(G.st_cpu[0][0]) * cpus);
720 memcpy(G.st_irq[2], G.st_irq[0], sizeof(G.st_irq[0][0]) * cpus);
721 memcpy(G.st_irqcpu[2], G.st_irqcpu[0], sizeof(G.st_irqcpu[0][0]) * cpus * G.irqcpu_nr);
722 if (display_opt(D_SOFTIRQS)) {
723 memcpy(G.st_softirqcpu[2], G.st_softirqcpu[0],
724 sizeof(G.st_softirqcpu[0][0]) * cpus * G.softirqcpu_nr);
725 }
726
727 current = 1;
728 while (1) {
729 /* Suspend until a signal is received */
730 pause();
731
732 /* Set structures to 0 to distinguish off/online CPUs */
733 memset(&G.st_cpu[current][/*cpu:*/ 1], 0, sizeof(G.st_cpu[0][0]) * G.cpu_nr);
734
735 get_localtime(&G.timestamp[current]);
736
737 /* Read stats */
738 if (G.cpu_nr > 1) {
739 G.per_cpu_uptime[current] = 0;
740 get_uptime(&G.per_cpu_uptime[current]);
741 }
742 get_cpu_statistics(G.st_cpu[current], &G.global_uptime[current], &G.per_cpu_uptime[current]);
743
744 if (display_opt(D_IRQ_SUM))
745 get_irqs_from_stat(G.st_irq[current]);
746
747 if (display_opt(D_IRQ_SUM | D_IRQ_CPU)) {
748 int cpu;
749 for (cpu = 1; cpu <= G.cpu_nr; cpu++) {
750 G.st_irq[current][cpu].irq_nr = 0;
751 }
752 /* accumulates .irq_nr */
753 get_irqs_from_interrupts(PROCFS_INTERRUPTS, G.st_irqcpu,
754 G.irqcpu_nr, current);
755 }
756
757 if (display_opt(D_SOFTIRQS))
758 get_irqs_from_interrupts(PROCFS_SOFTIRQS,
759 G.st_softirqcpu,
760 G.softirqcpu_nr, current);
761
762 write_stats(current);
763
764 if (G.count > 0) {
765 if (--G.count == 0)
766 break;
767 }
768
769 current ^= 1;
770 }
771
772 /* Print average statistics */
773 write_stats_avg(current);
774}
775
776/* Initialization */
777
778/* Get number of clock ticks per sec */
779static ALWAYS_INLINE unsigned get_hz(void)
780{
781 return sysconf(_SC_CLK_TCK);
782}
783
784static void alloc_struct(int cpus)
785{
786 int i;
787 for (i = 0; i < 3; i++) {
788 G.st_cpu[i] = xzalloc(sizeof(G.st_cpu[i][0]) * cpus);
789 G.st_irq[i] = xzalloc(sizeof(G.st_irq[i][0]) * cpus);
790 G.st_irqcpu[i] = xzalloc(sizeof(G.st_irqcpu[i][0]) * cpus * G.irqcpu_nr);
791 G.st_softirqcpu[i] = xzalloc(sizeof(G.st_softirqcpu[i][0]) * cpus * G.softirqcpu_nr);
792 }
793 G.cpu_bitmap_len = (cpus >> 3) + 1;
794 G.cpu_bitmap = xzalloc(G.cpu_bitmap_len);
795}
796
797static void print_header(struct tm *t)
798{
799 char cur_date[16];
800 struct utsname uts;
801
802 /* Get system name, release number and hostname */
803 uname(&uts);
804
805 strftime(cur_date, sizeof(cur_date), "%x", t);
806
807 printf("%s %s (%s)\t%s\t_%s_\t(%u CPU)\n",
808 uts.sysname, uts.release, uts.nodename, cur_date, uts.machine, G.cpu_nr);
809}
810
811/*
812 * Get number of interrupts available per processor
813 */
814static int get_irqcpu_nr(const char *f, int max_irqs)
815{
816 FILE *fp;
817 char *line;
818 unsigned linelen;
819 unsigned irq;
820
821 fp = fopen_for_read(f);
822 if (!fp) /* No interrupts file */
823 return 0;
824
825 linelen = INTERRUPTS_LINE + 16 * G.cpu_nr;
826 line = xmalloc(linelen);
827
828 irq = 0;
829 while (fgets(line, linelen, fp)
830 && irq < max_irqs
831 ) {
832 int p = strcspn(line, ":");
833 if ((p > 0) && (p < 16))
834 irq++;
835 }
836
837 fclose(fp);
838 free(line);
839
840 return irq;
841}
842
843//usage:#define mpstat_trivial_usage
844//usage: "[-A] [-I SUM|CPU|ALL|SCPU] [-u] [-P num|ALL] [INTERVAL [COUNT]]"
845//usage:#define mpstat_full_usage "\n\n"
846//usage: "Per-processor statistics\n"
847//usage: "\n -A Same as -I ALL -u -P ALL"
848//usage: "\n -I SUM|CPU|ALL|SCPU Report interrupt statistics"
849//usage: "\n -P num|ALL Processor to monitor"
850//usage: "\n -u Report CPU utilization"
851
852int mpstat_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE;
853int mpstat_main(int UNUSED_PARAM argc, char **argv)
854{
855 char *opt_irq_fmt;
856 char *opt_set_cpu;
857 int i, opt;
858 enum {
859 OPT_ALL = 1 << 0, /* -A */
860 OPT_INTS = 1 << 1, /* -I */
861 OPT_SETCPU = 1 << 2, /* -P */
862 OPT_UTIL = 1 << 3, /* -u */
863 };
864
865 /* Dont buffer data if redirected to a pipe */
866 setbuf(stdout, NULL);
867
868 INIT_G();
869
870 G.interval = -1;
871
872 /* Get number of processors */
873 G.cpu_nr = get_cpu_count();
874
875 /* Get number of clock ticks per sec */
876 G.hz = get_hz();
877
878 /* Calculate number of interrupts per processor */
879 G.irqcpu_nr = get_irqcpu_nr(PROCFS_INTERRUPTS, NR_IRQS) + NR_IRQCPU_PREALLOC;
880
881 /* Calculate number of soft interrupts per processor */
882 G.softirqcpu_nr = get_irqcpu_nr(PROCFS_SOFTIRQS, NR_IRQS) + NR_IRQCPU_PREALLOC;
883
884 /* Allocate space for structures. + 1 for global structure. */
885 alloc_struct(G.cpu_nr + 1);
886
887 /* Parse and process arguments */
888 opt = getopt32(argv, "AI:P:u", &opt_irq_fmt, &opt_set_cpu);
889 argv += optind;
890
891 if (*argv) {
892 /* Get interval */
893 G.interval = xatoi_positive(*argv);
894 G.count = -1;
895 argv++;
896 if (*argv) {
897 /* Get count value */
898 if (G.interval == 0)
899 bb_show_usage();
900 G.count = xatoi_positive(*argv);
901 //if (*++argv)
902 // bb_show_usage();
903 }
904 }
905 if (G.interval < 0)
906 G.interval = 0;
907
908 if (opt & OPT_ALL) {
909 G.p_option = 1;
910 G.options |= D_CPU + D_IRQ_SUM + D_IRQ_CPU + D_SOFTIRQS;
911 /* Select every CPU */
912 memset(G.cpu_bitmap, 0xff, G.cpu_bitmap_len);
913 }
914
915 if (opt & OPT_INTS) {
916 static const char v[] = {
917 D_IRQ_CPU, D_IRQ_SUM, D_SOFTIRQS,
918 D_IRQ_SUM + D_IRQ_CPU + D_SOFTIRQS
919 };
920 i = index_in_strings("CPU\0SUM\0SCPU\0ALL\0", opt_irq_fmt);
921 if (i == -1)
922 bb_show_usage();
923 G.options |= v[i];
924 }
925
926 if ((opt & OPT_UTIL) /* -u? */
927 || G.options == 0 /* nothing? (use default then) */
928 ) {
929 G.options |= D_CPU;
930 }
931
932 if (opt & OPT_SETCPU) {
933 char *t;
934 G.p_option = 1;
935
936 for (t = strtok(opt_set_cpu, ","); t; t = strtok(NULL, ",")) {
937 if (strcmp(t, "ALL") == 0) {
938 /* Select every CPU */
939 memset(G.cpu_bitmap, 0xff, G.cpu_bitmap_len);
940 } else {
941 /* Get CPU number */
942 unsigned n = xatoi_positive(t);
943 if (n >= G.cpu_nr)
944 bb_error_msg_and_die("not that many processors");
945 n++;
946 G.cpu_bitmap[n >> 3] |= 1 << (n & 7);
947 }
948 }
949 }
950
951 if (!G.p_option)
952 /* Display global stats */
953 G.cpu_bitmap[0] = 1;
954
955 /* Get time */
956 get_localtime(&G.timestamp[0]);
957
958 /* Display header */
959 print_header(&G.timestamp[0]);
960
961 /* The main loop */
962 main_loop();
963
964 if (ENABLE_FEATURE_CLEAN_UP) {
965 /* Clean up */
966 for (i = 0; i < 3; i++) {
967 free(G.st_cpu[i]);
968 free(G.st_irq[i]);
969 free(G.st_irqcpu[i]);
970 free(G.st_softirqcpu[i]);
971 }
972 free(G.cpu_bitmap);
973 free(&G);
974 }
975
976 return EXIT_SUCCESS;
977}
Note: See TracBrowser for help on using the repository browser.