linux/tools/perf/bench/futex-lock-pi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2015 Davidlohr Bueso.
   4 */
   5
   6/* For the CLR_() macros */
   7#include <string.h>
   8#include <pthread.h>
   9
  10#include <signal.h>
  11#include "../util/stat.h"
  12#include <subcmd/parse-options.h>
  13#include <linux/compiler.h>
  14#include <linux/kernel.h>
  15#include <linux/zalloc.h>
  16#include <errno.h>
  17#include <perf/cpumap.h>
  18#include "bench.h"
  19#include "futex.h"
  20
  21#include <err.h>
  22#include <stdlib.h>
  23#include <sys/time.h>
  24
  25struct worker {
  26        int tid;
  27        u_int32_t *futex;
  28        pthread_t thread;
  29        unsigned long ops;
  30};
  31
  32static u_int32_t global_futex = 0;
  33static struct worker *worker;
  34static unsigned int nsecs = 10;
  35static bool silent = false, multi = false;
  36static bool done = false, fshared = false;
  37static unsigned int nthreads = 0;
  38static int futex_flag = 0;
  39static pthread_mutex_t thread_lock;
  40static unsigned int threads_starting;
  41static struct stats throughput_stats;
  42static pthread_cond_t thread_parent, thread_worker;
  43
  44static const struct option options[] = {
  45        OPT_UINTEGER('t', "threads",  &nthreads, "Specify amount of threads"),
  46        OPT_UINTEGER('r', "runtime", &nsecs,     "Specify runtime (in seconds)"),
  47        OPT_BOOLEAN( 'M', "multi",   &multi,     "Use multiple futexes"),
  48        OPT_BOOLEAN( 's', "silent",  &silent,    "Silent mode: do not display data/details"),
  49        OPT_BOOLEAN( 'S', "shared",  &fshared,   "Use shared futexes instead of private ones"),
  50        OPT_END()
  51};
  52
  53static const char * const bench_futex_lock_pi_usage[] = {
  54        "perf bench futex lock-pi <options>",
  55        NULL
  56};
  57
  58static void print_summary(void)
  59{
  60        unsigned long avg = avg_stats(&throughput_stats);
  61        double stddev = stddev_stats(&throughput_stats);
  62
  63        printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
  64               !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
  65               (int)bench__runtime.tv_sec);
  66}
  67
  68static void toggle_done(int sig __maybe_unused,
  69                        siginfo_t *info __maybe_unused,
  70                        void *uc __maybe_unused)
  71{
  72        /* inform all threads that we're done for the day */
  73        done = true;
  74        gettimeofday(&bench__end, NULL);
  75        timersub(&bench__end, &bench__start, &bench__runtime);
  76}
  77
  78static void *workerfn(void *arg)
  79{
  80        struct worker *w = (struct worker *) arg;
  81        unsigned long ops = w->ops;
  82
  83        pthread_mutex_lock(&thread_lock);
  84        threads_starting--;
  85        if (!threads_starting)
  86                pthread_cond_signal(&thread_parent);
  87        pthread_cond_wait(&thread_worker, &thread_lock);
  88        pthread_mutex_unlock(&thread_lock);
  89
  90        do {
  91                int ret;
  92        again:
  93                ret = futex_lock_pi(w->futex, NULL, futex_flag);
  94
  95                if (ret) { /* handle lock acquisition */
  96                        if (!silent)
  97                                warn("thread %d: Could not lock pi-lock for %p (%d)",
  98                                     w->tid, w->futex, ret);
  99                        if (done)
 100                                break;
 101
 102                        goto again;
 103                }
 104
 105                usleep(1);
 106                ret = futex_unlock_pi(w->futex, futex_flag);
 107                if (ret && !silent)
 108                        warn("thread %d: Could not unlock pi-lock for %p (%d)",
 109                             w->tid, w->futex, ret);
 110                ops++; /* account for thread's share of work */
 111        }  while (!done);
 112
 113        w->ops = ops;
 114        return NULL;
 115}
 116
 117static void create_threads(struct worker *w, pthread_attr_t thread_attr,
 118                           struct perf_cpu_map *cpu)
 119{
 120        cpu_set_t cpuset;
 121        unsigned int i;
 122
 123        threads_starting = nthreads;
 124
 125        for (i = 0; i < nthreads; i++) {
 126                worker[i].tid = i;
 127
 128                if (multi) {
 129                        worker[i].futex = calloc(1, sizeof(u_int32_t));
 130                        if (!worker[i].futex)
 131                                err(EXIT_FAILURE, "calloc");
 132                } else
 133                        worker[i].futex = &global_futex;
 134
 135                CPU_ZERO(&cpuset);
 136                CPU_SET(cpu->map[i % cpu->nr], &cpuset);
 137
 138                if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
 139                        err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
 140
 141                if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i]))
 142                        err(EXIT_FAILURE, "pthread_create");
 143        }
 144}
 145
 146int bench_futex_lock_pi(int argc, const char **argv)
 147{
 148        int ret = 0;
 149        unsigned int i;
 150        struct sigaction act;
 151        pthread_attr_t thread_attr;
 152        struct perf_cpu_map *cpu;
 153
 154        argc = parse_options(argc, argv, options, bench_futex_lock_pi_usage, 0);
 155        if (argc)
 156                goto err;
 157
 158        cpu = perf_cpu_map__new(NULL);
 159        if (!cpu)
 160                err(EXIT_FAILURE, "calloc");
 161
 162        memset(&act, 0, sizeof(act));
 163        sigfillset(&act.sa_mask);
 164        act.sa_sigaction = toggle_done;
 165        sigaction(SIGINT, &act, NULL);
 166
 167        if (!nthreads)
 168                nthreads = cpu->nr;
 169
 170        worker = calloc(nthreads, sizeof(*worker));
 171        if (!worker)
 172                err(EXIT_FAILURE, "calloc");
 173
 174        if (!fshared)
 175                futex_flag = FUTEX_PRIVATE_FLAG;
 176
 177        printf("Run summary [PID %d]: %d threads doing pi lock/unlock pairing for %d secs.\n\n",
 178               getpid(), nthreads, nsecs);
 179
 180        init_stats(&throughput_stats);
 181        pthread_mutex_init(&thread_lock, NULL);
 182        pthread_cond_init(&thread_parent, NULL);
 183        pthread_cond_init(&thread_worker, NULL);
 184
 185        threads_starting = nthreads;
 186        pthread_attr_init(&thread_attr);
 187        gettimeofday(&bench__start, NULL);
 188
 189        create_threads(worker, thread_attr, cpu);
 190        pthread_attr_destroy(&thread_attr);
 191
 192        pthread_mutex_lock(&thread_lock);
 193        while (threads_starting)
 194                pthread_cond_wait(&thread_parent, &thread_lock);
 195        pthread_cond_broadcast(&thread_worker);
 196        pthread_mutex_unlock(&thread_lock);
 197
 198        sleep(nsecs);
 199        toggle_done(0, NULL, NULL);
 200
 201        for (i = 0; i < nthreads; i++) {
 202                ret = pthread_join(worker[i].thread, NULL);
 203                if (ret)
 204                        err(EXIT_FAILURE, "pthread_join");
 205        }
 206
 207        /* cleanup & report results */
 208        pthread_cond_destroy(&thread_parent);
 209        pthread_cond_destroy(&thread_worker);
 210        pthread_mutex_destroy(&thread_lock);
 211
 212        for (i = 0; i < nthreads; i++) {
 213                unsigned long t = bench__runtime.tv_sec > 0 ?
 214                        worker[i].ops / bench__runtime.tv_sec : 0;
 215
 216                update_stats(&throughput_stats, t);
 217                if (!silent)
 218                        printf("[thread %3d] futex: %p [ %ld ops/sec ]\n",
 219                               worker[i].tid, worker[i].futex, t);
 220
 221                if (multi)
 222                        zfree(&worker[i].futex);
 223        }
 224
 225        print_summary();
 226
 227        free(worker);
 228        return ret;
 229err:
 230        usage_with_options(bench_futex_lock_pi_usage, options);
 231        exit(EXIT_FAILURE);
 232}
 233
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.