linux/Documentation/filesystems/Locking
<<
>>
Prefs
   1        The text below describes the locking rules for VFS-related methods.
   2It is (believed to be) up-to-date. *Please*, if you change anything in
   3prototypes or locking protocols - update this file. And update the relevant
   4instances in the tree, don't leave that to maintainers of filesystems/devices/
   5etc. At the very least, put the list of dubious cases in the end of this file.
   6Don't turn it into log - maintainers of out-of-the-tree code are supposed to
   7be able to use diff(1).
   8        Thing currently missing here: socket operations. Alexey?
   9
  10--------------------------- dentry_operations --------------------------
  11prototypes:
  12        int (*d_revalidate)(struct dentry *, unsigned int);
  13        int (*d_hash)(const struct dentry *, const struct inode *,
  14                        struct qstr *);
  15        int (*d_compare)(const struct dentry *, const struct inode *,
  16                        const struct dentry *, const struct inode *,
  17                        unsigned int, const char *, const struct qstr *);
  18        int (*d_delete)(struct dentry *);
  19        void (*d_release)(struct dentry *);
  20        void (*d_iput)(struct dentry *, struct inode *);
  21        char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
  22        struct vfsmount *(*d_automount)(struct path *path);
  23        int (*d_manage)(struct dentry *, bool);
  24
  25locking rules:
  26                rename_lock     ->d_lock        may block       rcu-walk
  27d_revalidate:   no              no              yes (ref-walk)  maybe
  28d_hash          no              no              no              maybe
  29d_compare:      yes             no              no              maybe
  30d_delete:       no              yes             no              no
  31d_release:      no              no              yes             no
  32d_prune:        no              yes             no              no
  33d_iput:         no              no              yes             no
  34d_dname:        no              no              no              no
  35d_automount:    no              no              yes             no
  36d_manage:       no              no              yes (ref-walk)  maybe
  37
  38--------------------------- inode_operations --------------------------- 
  39prototypes:
  40        int (*create) (struct inode *,struct dentry *,umode_t, bool);
  41        struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
  42        int (*link) (struct dentry *,struct inode *,struct dentry *);
  43        int (*unlink) (struct inode *,struct dentry *);
  44        int (*symlink) (struct inode *,struct dentry *,const char *);
  45        int (*mkdir) (struct inode *,struct dentry *,umode_t);
  46        int (*rmdir) (struct inode *,struct dentry *);
  47        int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
  48        int (*rename) (struct inode *, struct dentry *,
  49                        struct inode *, struct dentry *);
  50        int (*readlink) (struct dentry *, char __user *,int);
  51        void * (*follow_link) (struct dentry *, struct nameidata *);
  52        void (*put_link) (struct dentry *, struct nameidata *, void *);
  53        void (*truncate) (struct inode *);
  54        int (*permission) (struct inode *, int, unsigned int);
  55        int (*get_acl)(struct inode *, int);
  56        int (*setattr) (struct dentry *, struct iattr *);
  57        int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
  58        int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
  59        ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
  60        ssize_t (*listxattr) (struct dentry *, char *, size_t);
  61        int (*removexattr) (struct dentry *, const char *);
  62        int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
  63        void (*update_time)(struct inode *, struct timespec *, int);
  64        int (*atomic_open)(struct inode *, struct dentry *,
  65                                struct file *, unsigned open_flag,
  66                                umode_t create_mode, int *opened);
  67
  68locking rules:
  69        all may block
  70                i_mutex(inode)
  71lookup:         yes
  72create:         yes
  73link:           yes (both)
  74mknod:          yes
  75symlink:        yes
  76mkdir:          yes
  77unlink:         yes (both)
  78rmdir:          yes (both)      (see below)
  79rename:         yes (all)       (see below)
  80readlink:       no
  81follow_link:    no
  82put_link:       no
  83truncate:       yes             (see below)
  84setattr:        yes
  85permission:     no (may not block if called in rcu-walk mode)
  86get_acl:        no
  87getattr:        no
  88setxattr:       yes
  89getxattr:       no
  90listxattr:      no
  91removexattr:    yes
  92fiemap:         no
  93update_time:    no
  94atomic_open:    yes
  95
  96        Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
  97victim.
  98        cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
  99        ->truncate() is never called directly - it's a callback, not a
 100method. It's called by vmtruncate() - deprecated library function used by
 101->setattr(). Locking information above applies to that call (i.e. is
 102inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been
 103passed).
 104
 105See Documentation/filesystems/directory-locking for more detailed discussion
 106of the locking scheme for directory operations.
 107
 108--------------------------- super_operations ---------------------------
 109prototypes:
 110        struct inode *(*alloc_inode)(struct super_block *sb);
 111        void (*destroy_inode)(struct inode *);
 112        void (*dirty_inode) (struct inode *, int flags);
 113        int (*write_inode) (struct inode *, struct writeback_control *wbc);
 114        int (*drop_inode) (struct inode *);
 115        void (*evict_inode) (struct inode *);
 116        void (*put_super) (struct super_block *);
 117        int (*sync_fs)(struct super_block *sb, int wait);
 118        int (*freeze_fs) (struct super_block *);
 119        int (*unfreeze_fs) (struct super_block *);
 120        int (*statfs) (struct dentry *, struct kstatfs *);
 121        int (*remount_fs) (struct super_block *, int *, char *);
 122        void (*umount_begin) (struct super_block *);
 123        int (*show_options)(struct seq_file *, struct dentry *);
 124        ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
 125        ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
 126        int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
 127
 128locking rules:
 129        All may block [not true, see below]
 130                        s_umount
 131alloc_inode:
 132destroy_inode:
 133dirty_inode:
 134write_inode:
 135drop_inode:                             !!!inode->i_lock!!!
 136evict_inode:
 137put_super:              write
 138sync_fs:                read
 139freeze_fs:              write
 140unfreeze_fs:            write
 141statfs:                 maybe(read)     (see below)
 142remount_fs:             write
 143umount_begin:           no
 144show_options:           no              (namespace_sem)
 145quota_read:             no              (see below)
 146quota_write:            no              (see below)
 147bdev_try_to_free_page:  no              (see below)
 148
 149->statfs() has s_umount (shared) when called by ustat(2) (native or
 150compat), but that's an accident of bad API; s_umount is used to pin
 151the superblock down when we only have dev_t given us by userland to
 152identify the superblock.  Everything else (statfs(), fstatfs(), etc.)
 153doesn't hold it when calling ->statfs() - superblock is pinned down
 154by resolving the pathname passed to syscall.
 155->quota_read() and ->quota_write() functions are both guaranteed to
 156be the only ones operating on the quota file by the quota code (via
 157dqio_sem) (unless an admin really wants to screw up something and
 158writes to quota files with quotas on). For other details about locking
 159see also dquot_operations section.
 160->bdev_try_to_free_page is called from the ->releasepage handler of
 161the block device inode.  See there for more details.
 162
 163--------------------------- file_system_type ---------------------------
 164prototypes:
 165        int (*get_sb) (struct file_system_type *, int,
 166                       const char *, void *, struct vfsmount *);
 167        struct dentry *(*mount) (struct file_system_type *, int,
 168                       const char *, void *);
 169        void (*kill_sb) (struct super_block *);
 170locking rules:
 171                may block
 172mount           yes
 173kill_sb         yes
 174
 175->mount() returns ERR_PTR or the root dentry; its superblock should be locked
 176on return.
 177->kill_sb() takes a write-locked superblock, does all shutdown work on it,
 178unlocks and drops the reference.
 179
 180--------------------------- address_space_operations --------------------------
 181prototypes:
 182        int (*writepage)(struct page *page, struct writeback_control *wbc);
 183        int (*readpage)(struct file *, struct page *);
 184        int (*sync_page)(struct page *);
 185        int (*writepages)(struct address_space *, struct writeback_control *);
 186        int (*set_page_dirty)(struct page *page);
 187        int (*readpages)(struct file *filp, struct address_space *mapping,
 188                        struct list_head *pages, unsigned nr_pages);
 189        int (*write_begin)(struct file *, struct address_space *mapping,
 190                                loff_t pos, unsigned len, unsigned flags,
 191                                struct page **pagep, void **fsdata);
 192        int (*write_end)(struct file *, struct address_space *mapping,
 193                                loff_t pos, unsigned len, unsigned copied,
 194                                struct page *page, void *fsdata);
 195        sector_t (*bmap)(struct address_space *, sector_t);
 196        int (*invalidatepage) (struct page *, unsigned long);
 197        int (*releasepage) (struct page *, int);
 198        void (*freepage)(struct page *);
 199        int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
 200                        loff_t offset, unsigned long nr_segs);
 201        int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
 202                                unsigned long *);
 203        int (*migratepage)(struct address_space *, struct page *, struct page *);
 204        int (*launder_page)(struct page *);
 205        int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long);
 206        int (*error_remove_page)(struct address_space *, struct page *);
 207        int (*swap_activate)(struct file *);
 208        int (*swap_deactivate)(struct file *);
 209
 210locking rules:
 211        All except set_page_dirty and freepage may block
 212
 213                        PageLocked(page)        i_mutex
 214writepage:              yes, unlocks (see below)
 215readpage:               yes, unlocks
 216sync_page:              maybe
 217writepages:
 218set_page_dirty          no
 219readpages:
 220write_begin:            locks the page          yes
 221write_end:              yes, unlocks            yes
 222bmap:
 223invalidatepage:         yes
 224releasepage:            yes
 225freepage:               yes
 226direct_IO:
 227get_xip_mem:                                    maybe
 228migratepage:            yes (both)
 229launder_page:           yes
 230is_partially_uptodate:  yes
 231error_remove_page:      yes
 232swap_activate:          no
 233swap_deactivate:        no
 234
 235        ->write_begin(), ->write_end(), ->sync_page() and ->readpage()
 236may be called from the request handler (/dev/loop).
 237
 238        ->readpage() unlocks the page, either synchronously or via I/O
 239completion.
 240
 241        ->readpages() populates the pagecache with the passed pages and starts
 242I/O against them.  They come unlocked upon I/O completion.
 243
 244        ->writepage() is used for two purposes: for "memory cleansing" and for
 245"sync".  These are quite different operations and the behaviour may differ
 246depending upon the mode.
 247
 248If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then
 249it *must* start I/O against the page, even if that would involve
 250blocking on in-progress I/O.
 251
 252If writepage is called for memory cleansing (sync_mode ==
 253WBC_SYNC_NONE) then its role is to get as much writeout underway as
 254possible.  So writepage should try to avoid blocking against
 255currently-in-progress I/O.
 256
 257If the filesystem is not called for "sync" and it determines that it
 258would need to block against in-progress I/O to be able to start new I/O
 259against the page the filesystem should redirty the page with
 260redirty_page_for_writepage(), then unlock the page and return zero.
 261This may also be done to avoid internal deadlocks, but rarely.
 262
 263If the filesystem is called for sync then it must wait on any
 264in-progress I/O and then start new I/O.
 265
 266The filesystem should unlock the page synchronously, before returning to the
 267caller, unless ->writepage() returns special WRITEPAGE_ACTIVATE
 268value. WRITEPAGE_ACTIVATE means that page cannot really be written out
 269currently, and VM should stop calling ->writepage() on this page for some
 270time. VM does this by moving page to the head of the active list, hence the
 271name.
 272
 273Unless the filesystem is going to redirty_page_for_writepage(), unlock the page
 274and return zero, writepage *must* run set_page_writeback() against the page,
 275followed by unlocking it.  Once set_page_writeback() has been run against the
 276page, write I/O can be submitted and the write I/O completion handler must run
 277end_page_writeback() once the I/O is complete.  If no I/O is submitted, the
 278filesystem must run end_page_writeback() against the page before returning from
 279writepage.
 280
 281That is: after 2.5.12, pages which are under writeout are *not* locked.  Note,
 282if the filesystem needs the page to be locked during writeout, that is ok, too,
 283the page is allowed to be unlocked at any point in time between the calls to
 284set_page_writeback() and end_page_writeback().
 285
 286Note, failure to run either redirty_page_for_writepage() or the combination of
 287set_page_writeback()/end_page_writeback() on a page submitted to writepage
 288will leave the page itself marked clean but it will be tagged as dirty in the
 289radix tree.  This incoherency can lead to all sorts of hard-to-debug problems
 290in the filesystem like having dirty inodes at umount and losing written data.
 291
 292        ->sync_page() locking rules are not well-defined - usually it is called
 293with lock on page, but that is not guaranteed. Considering the currently
 294existing instances of this method ->sync_page() itself doesn't look
 295well-defined...
 296
 297        ->writepages() is used for periodic writeback and for syscall-initiated
 298sync operations.  The address_space should start I/O against at least
 299*nr_to_write pages.  *nr_to_write must be decremented for each page which is
 300written.  The address_space implementation may write more (or less) pages
 301than *nr_to_write asks for, but it should try to be reasonably close.  If
 302nr_to_write is NULL, all dirty pages must be written.
 303
 304writepages should _only_ write pages which are present on
 305mapping->io_pages.
 306
 307        ->set_page_dirty() is called from various places in the kernel
 308when the target page is marked as needing writeback.  It may be called
 309under spinlock (it cannot block) and is sometimes called with the page
 310not locked.
 311
 312        ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some
 313filesystems and by the swapper. The latter will eventually go away.  Please,
 314keep it that way and don't breed new callers.
 315
 316        ->invalidatepage() is called when the filesystem must attempt to drop
 317some or all of the buffers from the page when it is being truncated.  It
 318returns zero on success.  If ->invalidatepage is zero, the kernel uses
 319block_invalidatepage() instead.
 320
 321        ->releasepage() is called when the kernel is about to try to drop the
 322buffers from the page in preparation for freeing it.  It returns zero to
 323indicate that the buffers are (or may be) freeable.  If ->releasepage is zero,
 324the kernel assumes that the fs has no private interest in the buffers.
 325
 326        ->freepage() is called when the kernel is done dropping the page
 327from the page cache.
 328
 329        ->launder_page() may be called prior to releasing a page if
 330it is still found to be dirty. It returns zero if the page was successfully
 331cleaned, or an error value if not. Note that in order to prevent the page
 332getting mapped back in and redirtied, it needs to be kept locked
 333across the entire operation.
 334
 335        ->swap_activate will be called with a non-zero argument on
 336files backing (non block device backed) swapfiles. A return value
 337of zero indicates success, in which case this file can be used for
 338backing swapspace. The swapspace operations will be proxied to the
 339address space operations.
 340
 341        ->swap_deactivate() will be called in the sys_swapoff()
 342path after ->swap_activate() returned success.
 343
 344----------------------- file_lock_operations ------------------------------
 345prototypes:
 346        void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
 347        void (*fl_release_private)(struct file_lock *);
 348
 349
 350locking rules:
 351                        file_lock_lock  may block
 352fl_copy_lock:           yes             no
 353fl_release_private:     maybe           no
 354
 355----------------------- lock_manager_operations ---------------------------
 356prototypes:
 357        int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
 358        void (*lm_notify)(struct file_lock *);  /* unblock callback */
 359        int (*lm_grant)(struct file_lock *, struct file_lock *, int);
 360        void (*lm_break)(struct file_lock *); /* break_lease callback */
 361        int (*lm_change)(struct file_lock **, int);
 362
 363locking rules:
 364                        file_lock_lock  may block
 365lm_compare_owner:       yes             no
 366lm_notify:              yes             no
 367lm_grant:               no              no
 368lm_break:               yes             no
 369lm_change               yes             no
 370
 371--------------------------- buffer_head -----------------------------------
 372prototypes:
 373        void (*b_end_io)(struct buffer_head *bh, int uptodate);
 374
 375locking rules:
 376        called from interrupts. In other words, extreme care is needed here.
 377bh is locked, but that's all warranties we have here. Currently only RAID1,
 378highmem, fs/buffer.c, and fs/ntfs/aops.c are providing these. Block devices
 379call this method upon the IO completion.
 380
 381--------------------------- block_device_operations -----------------------
 382prototypes:
 383        int (*open) (struct block_device *, fmode_t);
 384        int (*release) (struct gendisk *, fmode_t);
 385        int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 386        int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 387        int (*direct_access) (struct block_device *, sector_t, void **, unsigned long *);
 388        int (*media_changed) (struct gendisk *);
 389        void (*unlock_native_capacity) (struct gendisk *);
 390        int (*revalidate_disk) (struct gendisk *);
 391        int (*getgeo)(struct block_device *, struct hd_geometry *);
 392        void (*swap_slot_free_notify) (struct block_device *, unsigned long);
 393
 394locking rules:
 395                        bd_mutex
 396open:                   yes
 397release:                yes
 398ioctl:                  no
 399compat_ioctl:           no
 400direct_access:          no
 401media_changed:          no
 402unlock_native_capacity: no
 403revalidate_disk:        no
 404getgeo:                 no
 405swap_slot_free_notify:  no      (see below)
 406
 407media_changed, unlock_native_capacity and revalidate_disk are called only from
 408check_disk_change().
 409
 410swap_slot_free_notify is called with swap_lock and sometimes the page lock
 411held.
 412
 413
 414--------------------------- file_operations -------------------------------
 415prototypes:
 416        loff_t (*llseek) (struct file *, loff_t, int);
 417        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
 418        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
 419        ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
 420        ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t);
 421        int (*readdir) (struct file *, void *, filldir_t);
 422        unsigned int (*poll) (struct file *, struct poll_table_struct *);
 423        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
 424        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
 425        int (*mmap) (struct file *, struct vm_area_struct *);
 426        int (*open) (struct inode *, struct file *);
 427        int (*flush) (struct file *);
 428        int (*release) (struct inode *, struct file *);
 429        int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
 430        int (*aio_fsync) (struct kiocb *, int datasync);
 431        int (*fasync) (int, struct file *, int);
 432        int (*lock) (struct file *, int, struct file_lock *);
 433        ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
 434                        loff_t *);
 435        ssize_t (*writev) (struct file *, const struct iovec *, unsigned long,
 436                        loff_t *);
 437        ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t,
 438                        void __user *);
 439        ssize_t (*sendpage) (struct file *, struct page *, int, size_t,
 440                        loff_t *, int);
 441        unsigned long (*get_unmapped_area)(struct file *, unsigned long,
 442                        unsigned long, unsigned long, unsigned long);
 443        int (*check_flags)(int);
 444        int (*flock) (struct file *, int, struct file_lock *);
 445        ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *,
 446                        size_t, unsigned int);
 447        ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
 448                        size_t, unsigned int);
 449        int (*setlease)(struct file *, long, struct file_lock **);
 450        long (*fallocate)(struct file *, int, loff_t, loff_t);
 451};
 452
 453locking rules:
 454        All may block except for ->setlease.
 455        No VFS locks held on entry except for ->setlease.
 456
 457->setlease has the file_list_lock held and must not sleep.
 458
 459->llseek() locking has moved from llseek to the individual llseek
 460implementations.  If your fs is not using generic_file_llseek, you
 461need to acquire and release the appropriate locks in your ->llseek().
 462For many filesystems, it is probably safe to acquire the inode
 463mutex or just to use i_size_read() instead.
 464Note: this does not protect the file->f_pos against concurrent modifications
 465since this is something the userspace has to take care about.
 466
 467->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags.
 468Most instances call fasync_helper(), which does that maintenance, so it's
 469not normally something one needs to worry about.  Return values > 0 will be
 470mapped to zero in the VFS layer.
 471
 472->readdir() and ->ioctl() on directories must be changed. Ideally we would
 473move ->readdir() to inode_operations and use a separate method for directory
 474->ioctl() or kill the latter completely. One of the problems is that for
 475anything that resembles union-mount we won't have a struct file for all
 476components. And there are other reasons why the current interface is a mess...
 477
 478->read on directories probably must go away - we should just enforce -EISDIR
 479in sys_read() and friends.
 480
 481--------------------------- dquot_operations -------------------------------
 482prototypes:
 483        int (*write_dquot) (struct dquot *);
 484        int (*acquire_dquot) (struct dquot *);
 485        int (*release_dquot) (struct dquot *);
 486        int (*mark_dirty) (struct dquot *);
 487        int (*write_info) (struct super_block *, int);
 488
 489These operations are intended to be more or less wrapping functions that ensure
 490a proper locking wrt the filesystem and call the generic quota operations.
 491
 492What filesystem should expect from the generic quota functions:
 493
 494                FS recursion    Held locks when called
 495write_dquot:    yes             dqonoff_sem or dqptr_sem
 496acquire_dquot:  yes             dqonoff_sem or dqptr_sem
 497release_dquot:  yes             dqonoff_sem or dqptr_sem
 498mark_dirty:     no              -
 499write_info:     yes             dqonoff_sem
 500
 501FS recursion means calling ->quota_read() and ->quota_write() from superblock
 502operations.
 503
 504More details about quota locking can be found in fs/dquot.c.
 505
 506--------------------------- vm_operations_struct -----------------------------
 507prototypes:
 508        void (*open)(struct vm_area_struct*);
 509        void (*close)(struct vm_area_struct*);
 510        int (*fault)(struct vm_area_struct*, struct vm_fault *);
 511        int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
 512        int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
 513
 514locking rules:
 515                mmap_sem        PageLocked(page)
 516open:           yes
 517close:          yes
 518fault:          yes             can return with page locked
 519page_mkwrite:   yes             can return with page locked
 520access:         yes
 521
 522        ->fault() is called when a previously not present pte is about
 523to be faulted in. The filesystem must find and return the page associated
 524with the passed in "pgoff" in the vm_fault structure. If it is possible that
 525the page may be truncated and/or invalidated, then the filesystem must lock
 526the page, then ensure it is not already truncated (the page lock will block
 527subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
 528locked. The VM will unlock the page.
 529
 530        ->page_mkwrite() is called when a previously read-only pte is
 531about to become writeable. The filesystem again must ensure that there are
 532no truncate/invalidate races, and then return with the page locked. If
 533the page has been truncated, the filesystem should not look up a new page
 534like the ->fault() handler, but simply return with VM_FAULT_NOPAGE, which
 535will cause the VM to retry the fault.
 536
 537        ->access() is called when get_user_pages() fails in
 538acces_process_vm(), typically used to debug a process through
 539/proc/pid/mem or ptrace.  This function is needed only for
 540VM_IO | VM_PFNMAP VMAs.
 541
 542================================================================================
 543                        Dubious stuff
 544
 545(if you break something or notice that it is broken and do not fix it yourself
 546- at least put it here)
 547
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.