linux/fs/btrfs/locking.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2008 Oracle.  All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or
   5 * modify it under the terms of the GNU General Public
   6 * License v2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  11 * General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public
  14 * License along with this program; if not, write to the
  15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16 * Boston, MA 021110-1307, USA.
  17 */
  18#include <linux/sched.h>
  19#include <linux/pagemap.h>
  20#include <linux/spinlock.h>
  21#include <linux/page-flags.h>
  22#include <asm/bug.h>
  23#include "ctree.h"
  24#include "extent_io.h"
  25#include "locking.h"
  26
  27void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
  28
  29/*
  30 * if we currently have a spinning reader or writer lock
  31 * (indicated by the rw flag) this will bump the count
  32 * of blocking holders and drop the spinlock.
  33 */
  34void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
  35{
  36        if (eb->lock_nested) {
  37                read_lock(&eb->lock);
  38                if (eb->lock_nested && current->pid == eb->lock_owner) {
  39                        read_unlock(&eb->lock);
  40                        return;
  41                }
  42                read_unlock(&eb->lock);
  43        }
  44        if (rw == BTRFS_WRITE_LOCK) {
  45                if (atomic_read(&eb->blocking_writers) == 0) {
  46                        WARN_ON(atomic_read(&eb->spinning_writers) != 1);
  47                        atomic_dec(&eb->spinning_writers);
  48                        btrfs_assert_tree_locked(eb);
  49                        atomic_inc(&eb->blocking_writers);
  50                        write_unlock(&eb->lock);
  51                }
  52        } else if (rw == BTRFS_READ_LOCK) {
  53                btrfs_assert_tree_read_locked(eb);
  54                atomic_inc(&eb->blocking_readers);
  55                WARN_ON(atomic_read(&eb->spinning_readers) == 0);
  56                atomic_dec(&eb->spinning_readers);
  57                read_unlock(&eb->lock);
  58        }
  59        return;
  60}
  61
  62/*
  63 * if we currently have a blocking lock, take the spinlock
  64 * and drop our blocking count
  65 */
  66void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
  67{
  68        if (eb->lock_nested) {
  69                read_lock(&eb->lock);
  70                if (eb->lock_nested && current->pid == eb->lock_owner) {
  71                        read_unlock(&eb->lock);
  72                        return;
  73                }
  74                read_unlock(&eb->lock);
  75        }
  76        if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
  77                BUG_ON(atomic_read(&eb->blocking_writers) != 1);
  78                write_lock(&eb->lock);
  79                WARN_ON(atomic_read(&eb->spinning_writers));
  80                atomic_inc(&eb->spinning_writers);
  81                if (atomic_dec_and_test(&eb->blocking_writers) &&
  82                    waitqueue_active(&eb->write_lock_wq))
  83                        wake_up(&eb->write_lock_wq);
  84        } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
  85                BUG_ON(atomic_read(&eb->blocking_readers) == 0);
  86                read_lock(&eb->lock);
  87                atomic_inc(&eb->spinning_readers);
  88                if (atomic_dec_and_test(&eb->blocking_readers) &&
  89                    waitqueue_active(&eb->read_lock_wq))
  90                        wake_up(&eb->read_lock_wq);
  91        }
  92        return;
  93}
  94
  95/*
  96 * take a spinning read lock.  This will wait for any blocking
  97 * writers
  98 */
  99void btrfs_tree_read_lock(struct extent_buffer *eb)
 100{
 101again:
 102        read_lock(&eb->lock);
 103        if (atomic_read(&eb->blocking_writers) &&
 104            current->pid == eb->lock_owner) {
 105                /*
 106                 * This extent is already write-locked by our thread. We allow
 107                 * an additional read lock to be added because it's for the same
 108                 * thread. btrfs_find_all_roots() depends on this as it may be
 109                 * called on a partly (write-)locked tree.
 110                 */
 111                BUG_ON(eb->lock_nested);
 112                eb->lock_nested = 1;
 113                read_unlock(&eb->lock);
 114                return;
 115        }
 116        read_unlock(&eb->lock);
 117        wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
 118        read_lock(&eb->lock);
 119        if (atomic_read(&eb->blocking_writers)) {
 120                read_unlock(&eb->lock);
 121                goto again;
 122        }
 123        atomic_inc(&eb->read_locks);
 124        atomic_inc(&eb->spinning_readers);
 125}
 126
 127/*
 128 * returns 1 if we get the read lock and 0 if we don't
 129 * this won't wait for blocking writers
 130 */
 131int btrfs_try_tree_read_lock(struct extent_buffer *eb)
 132{
 133        if (atomic_read(&eb->blocking_writers))
 134                return 0;
 135
 136        read_lock(&eb->lock);
 137        if (atomic_read(&eb->blocking_writers)) {
 138                read_unlock(&eb->lock);
 139                return 0;
 140        }
 141        atomic_inc(&eb->read_locks);
 142        atomic_inc(&eb->spinning_readers);
 143        return 1;
 144}
 145
 146/*
 147 * returns 1 if we get the read lock and 0 if we don't
 148 * this won't wait for blocking writers or readers
 149 */
 150int btrfs_try_tree_write_lock(struct extent_buffer *eb)
 151{
 152        if (atomic_read(&eb->blocking_writers) ||
 153            atomic_read(&eb->blocking_readers))
 154                return 0;
 155        write_lock(&eb->lock);
 156        if (atomic_read(&eb->blocking_writers) ||
 157            atomic_read(&eb->blocking_readers)) {
 158                write_unlock(&eb->lock);
 159                return 0;
 160        }
 161        atomic_inc(&eb->write_locks);
 162        atomic_inc(&eb->spinning_writers);
 163        eb->lock_owner = current->pid;
 164        return 1;
 165}
 166
 167/*
 168 * drop a spinning read lock
 169 */
 170void btrfs_tree_read_unlock(struct extent_buffer *eb)
 171{
 172        if (eb->lock_nested) {
 173                read_lock(&eb->lock);
 174                if (eb->lock_nested && current->pid == eb->lock_owner) {
 175                        eb->lock_nested = 0;
 176                        read_unlock(&eb->lock);
 177                        return;
 178                }
 179                read_unlock(&eb->lock);
 180        }
 181        btrfs_assert_tree_read_locked(eb);
 182        WARN_ON(atomic_read(&eb->spinning_readers) == 0);
 183        atomic_dec(&eb->spinning_readers);
 184        atomic_dec(&eb->read_locks);
 185        read_unlock(&eb->lock);
 186}
 187
 188/*
 189 * drop a blocking read lock
 190 */
 191void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
 192{
 193        if (eb->lock_nested) {
 194                read_lock(&eb->lock);
 195                if (eb->lock_nested && current->pid == eb->lock_owner) {
 196                        eb->lock_nested = 0;
 197                        read_unlock(&eb->lock);
 198                        return;
 199                }
 200                read_unlock(&eb->lock);
 201        }
 202        btrfs_assert_tree_read_locked(eb);
 203        WARN_ON(atomic_read(&eb->blocking_readers) == 0);
 204        if (atomic_dec_and_test(&eb->blocking_readers) &&
 205            waitqueue_active(&eb->read_lock_wq))
 206                wake_up(&eb->read_lock_wq);
 207        atomic_dec(&eb->read_locks);
 208}
 209
 210/*
 211 * take a spinning write lock.  This will wait for both
 212 * blocking readers or writers
 213 */
 214void btrfs_tree_lock(struct extent_buffer *eb)
 215{
 216again:
 217        wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
 218        wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
 219        write_lock(&eb->lock);
 220        if (atomic_read(&eb->blocking_readers)) {
 221                write_unlock(&eb->lock);
 222                wait_event(eb->read_lock_wq,
 223                           atomic_read(&eb->blocking_readers) == 0);
 224                goto again;
 225        }
 226        if (atomic_read(&eb->blocking_writers)) {
 227                write_unlock(&eb->lock);
 228                wait_event(eb->write_lock_wq,
 229                           atomic_read(&eb->blocking_writers) == 0);
 230                goto again;
 231        }
 232        WARN_ON(atomic_read(&eb->spinning_writers));
 233        atomic_inc(&eb->spinning_writers);
 234        atomic_inc(&eb->write_locks);
 235        eb->lock_owner = current->pid;
 236}
 237
 238/*
 239 * drop a spinning or a blocking write lock.
 240 */
 241void btrfs_tree_unlock(struct extent_buffer *eb)
 242{
 243        int blockers = atomic_read(&eb->blocking_writers);
 244
 245        BUG_ON(blockers > 1);
 246
 247        btrfs_assert_tree_locked(eb);
 248        atomic_dec(&eb->write_locks);
 249
 250        if (blockers) {
 251                WARN_ON(atomic_read(&eb->spinning_writers));
 252                atomic_dec(&eb->blocking_writers);
 253                smp_mb();
 254                if (waitqueue_active(&eb->write_lock_wq))
 255                        wake_up(&eb->write_lock_wq);
 256        } else {
 257                WARN_ON(atomic_read(&eb->spinning_writers) != 1);
 258                atomic_dec(&eb->spinning_writers);
 259                write_unlock(&eb->lock);
 260        }
 261}
 262
 263void btrfs_assert_tree_locked(struct extent_buffer *eb)
 264{
 265        BUG_ON(!atomic_read(&eb->write_locks));
 266}
 267
 268void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
 269{
 270        BUG_ON(!atomic_read(&eb->read_locks));
 271}
 272
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.