linux/include/linux/kref.h
<<
>>
Prefs
   1/*
   2 * kref.h - library routines for handling generic reference counted objects
   3 *
   4 * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
   5 * Copyright (C) 2004 IBM Corp.
   6 *
   7 * based on kobject.h which was:
   8 * Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
   9 * Copyright (C) 2002-2003 Open Source Development Labs
  10 *
  11 * This file is released under the GPLv2.
  12 *
  13 */
  14
  15#ifndef _KREF_H_
  16#define _KREF_H_
  17
  18#include <linux/bug.h>
  19#include <linux/atomic.h>
  20#include <linux/kernel.h>
  21#include <linux/mutex.h>
  22#include <linux/spinlock.h>
  23
  24struct kref {
  25        atomic_t refcount;
  26};
  27
  28/**
  29 * kref_init - initialize object.
  30 * @kref: object in question.
  31 */
  32static inline void kref_init(struct kref *kref)
  33{
  34        atomic_set(&kref->refcount, 1);
  35}
  36
  37/**
  38 * kref_get - increment refcount for object.
  39 * @kref: object.
  40 */
  41static inline void kref_get(struct kref *kref)
  42{
  43        WARN_ON(!atomic_read(&kref->refcount));
  44        atomic_inc(&kref->refcount);
  45}
  46
  47/**
  48 * kref_sub - subtract a number of refcounts for object.
  49 * @kref: object.
  50 * @count: Number of recounts to subtract.
  51 * @release: pointer to the function that will clean up the object when the
  52 *           last reference to the object is released.
  53 *           This pointer is required, and it is not acceptable to pass kfree
  54 *           in as this function.  If the caller does pass kfree to this
  55 *           function, you will be publicly mocked mercilessly by the kref
  56 *           maintainer, and anyone else who happens to notice it.  You have
  57 *           been warned.
  58 *
  59 * Subtract @count from the refcount, and if 0, call release().
  60 * Return 1 if the object was removed, otherwise return 0.  Beware, if this
  61 * function returns 0, you still can not count on the kref from remaining in
  62 * memory.  Only use the return value if you want to see if the kref is now
  63 * gone, not present.
  64 */
  65static inline int kref_sub(struct kref *kref, unsigned int count,
  66             void (*release)(struct kref *kref))
  67{
  68        WARN_ON(release == NULL);
  69
  70        if (atomic_sub_and_test((int) count, &kref->refcount)) {
  71                release(kref);
  72                return 1;
  73        }
  74        return 0;
  75}
  76
  77/**
  78 * kref_put - decrement refcount for object.
  79 * @kref: object.
  80 * @release: pointer to the function that will clean up the object when the
  81 *           last reference to the object is released.
  82 *           This pointer is required, and it is not acceptable to pass kfree
  83 *           in as this function.  If the caller does pass kfree to this
  84 *           function, you will be publicly mocked mercilessly by the kref
  85 *           maintainer, and anyone else who happens to notice it.  You have
  86 *           been warned.
  87 *
  88 * Decrement the refcount, and if 0, call release().
  89 * Return 1 if the object was removed, otherwise return 0.  Beware, if this
  90 * function returns 0, you still can not count on the kref from remaining in
  91 * memory.  Only use the return value if you want to see if the kref is now
  92 * gone, not present.
  93 */
  94static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
  95{
  96        return kref_sub(kref, 1, release);
  97}
  98
  99/**
 100 * kref_put_spinlock_irqsave - decrement refcount for object.
 101 * @kref: object.
 102 * @release: pointer to the function that will clean up the object when the
 103 *           last reference to the object is released.
 104 *           This pointer is required, and it is not acceptable to pass kfree
 105 *           in as this function.
 106 * @lock: lock to take in release case
 107 *
 108 * Behaves identical to kref_put with one exception.  If the reference count
 109 * drops to zero, the lock will be taken atomically wrt dropping the reference
 110 * count.  The release function has to call spin_unlock() without _irqrestore.
 111 */
 112static inline int kref_put_spinlock_irqsave(struct kref *kref,
 113                void (*release)(struct kref *kref),
 114                spinlock_t *lock)
 115{
 116        unsigned long flags;
 117
 118        WARN_ON(release == NULL);
 119        if (atomic_add_unless(&kref->refcount, -1, 1))
 120                return 0;
 121        spin_lock_irqsave(lock, flags);
 122        if (atomic_dec_and_test(&kref->refcount)) {
 123                release(kref);
 124                local_irq_restore(flags);
 125                return 1;
 126        }
 127        spin_unlock_irqrestore(lock, flags);
 128        return 0;
 129}
 130
 131static inline int kref_put_mutex(struct kref *kref,
 132                                 void (*release)(struct kref *kref),
 133                                 struct mutex *lock)
 134{
 135        WARN_ON(release == NULL);
 136        if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
 137                mutex_lock(lock);
 138                if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
 139                        mutex_unlock(lock);
 140                        return 0;
 141                }
 142                release(kref);
 143                return 1;
 144        }
 145        return 0;
 146}
 147
 148/**
 149 * kref_get_unless_zero - Increment refcount for object unless it is zero.
 150 * @kref: object.
 151 *
 152 * Return non-zero if the increment succeeded. Otherwise return 0.
 153 *
 154 * This function is intended to simplify locking around refcounting for
 155 * objects that can be looked up from a lookup structure, and which are
 156 * removed from that lookup structure in the object destructor.
 157 * Operations on such objects require at least a read lock around
 158 * lookup + kref_get, and a write lock around kref_put + remove from lookup
 159 * structure. Furthermore, RCU implementations become extremely tricky.
 160 * With a lookup followed by a kref_get_unless_zero *with return value check*
 161 * locking in the kref_put path can be deferred to the actual removal from
 162 * the lookup structure and RCU lookups become trivial.
 163 */
 164static inline int __must_check kref_get_unless_zero(struct kref *kref)
 165{
 166        return atomic_add_unless(&kref->refcount, 1, 0);
 167}
 168#endif /* _KREF_H_ */
 169
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.