linux/fs/file_table.c
<<
>>
Prefs
   1/*
   2 *  linux/fs/file_table.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
   6 */
   7
   8#include <linux/string.h>
   9#include <linux/slab.h>
  10#include <linux/file.h>
  11#include <linux/fdtable.h>
  12#include <linux/init.h>
  13#include <linux/module.h>
  14#include <linux/fs.h>
  15#include <linux/security.h>
  16#include <linux/eventpoll.h>
  17#include <linux/rcupdate.h>
  18#include <linux/mount.h>
  19#include <linux/capability.h>
  20#include <linux/cdev.h>
  21#include <linux/fsnotify.h>
  22#include <linux/sysctl.h>
  23#include <linux/percpu_counter.h>
  24
  25#include <asm/atomic.h>
  26
  27/* sysctl tunables... */
  28struct files_stat_struct files_stat = {
  29        .max_files = NR_FILE
  30};
  31
  32/* public. Not pretty! */
  33__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
  34
  35static struct percpu_counter nr_files __cacheline_aligned_in_smp;
  36
  37static inline void file_free_rcu(struct rcu_head *head)
  38{
  39        struct file *f =  container_of(head, struct file, f_u.fu_rcuhead);
  40        kmem_cache_free(filp_cachep, f);
  41}
  42
  43static inline void file_free(struct file *f)
  44{
  45        percpu_counter_dec(&nr_files);
  46        file_check_state(f);
  47        call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
  48}
  49
  50/*
  51 * Return the total number of open files in the system
  52 */
  53static int get_nr_files(void)
  54{
  55        return percpu_counter_read_positive(&nr_files);
  56}
  57
  58/*
  59 * Return the maximum number of open files in the system
  60 */
  61int get_max_files(void)
  62{
  63        return files_stat.max_files;
  64}
  65EXPORT_SYMBOL_GPL(get_max_files);
  66
  67/*
  68 * Handle nr_files sysctl
  69 */
  70#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
  71int proc_nr_files(ctl_table *table, int write, struct file *filp,
  72                     void __user *buffer, size_t *lenp, loff_t *ppos)
  73{
  74        files_stat.nr_files = get_nr_files();
  75        return proc_dointvec(table, write, filp, buffer, lenp, ppos);
  76}
  77#else
  78int proc_nr_files(ctl_table *table, int write, struct file *filp,
  79                     void __user *buffer, size_t *lenp, loff_t *ppos)
  80{
  81        return -ENOSYS;
  82}
  83#endif
  84
  85/* Find an unused file structure and return a pointer to it.
  86 * Returns NULL, if there are no more free file structures or
  87 * we run out of memory.
  88 *
  89 * Be very careful using this.  You are responsible for
  90 * getting write access to any mount that you might assign
  91 * to this filp, if it is opened for write.  If this is not
  92 * done, you will imbalance int the mount's writer count
  93 * and a warning at __fput() time.
  94 */
  95struct file *get_empty_filp(void)
  96{
  97        struct task_struct *tsk;
  98        static int old_max;
  99        struct file * f;
 100
 101        /*
 102         * Privileged users can go above max_files
 103         */
 104        if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
 105                /*
 106                 * percpu_counters are inaccurate.  Do an expensive check before
 107                 * we go and fail.
 108                 */
 109                if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
 110                        goto over;
 111        }
 112
 113        f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
 114        if (f == NULL)
 115                goto fail;
 116
 117        percpu_counter_inc(&nr_files);
 118        if (security_file_alloc(f))
 119                goto fail_sec;
 120
 121        tsk = current;
 122        INIT_LIST_HEAD(&f->f_u.fu_list);
 123        atomic_long_set(&f->f_count, 1);
 124        rwlock_init(&f->f_owner.lock);
 125        f->f_uid = tsk->fsuid;
 126        f->f_gid = tsk->fsgid;
 127        eventpoll_init_file(f);
 128        /* f->f_version: 0 */
 129        return f;
 130
 131over:
 132        /* Ran out of filps - report that */
 133        if (get_nr_files() > old_max) {
 134                printk(KERN_INFO "VFS: file-max limit %d reached\n",
 135                                        get_max_files());
 136                old_max = get_nr_files();
 137        }
 138        goto fail;
 139
 140fail_sec:
 141        file_free(f);
 142fail:
 143        return NULL;
 144}
 145
 146EXPORT_SYMBOL(get_empty_filp);
 147
 148/**
 149 * alloc_file - allocate and initialize a 'struct file'
 150 * @mnt: the vfsmount on which the file will reside
 151 * @dentry: the dentry representing the new file
 152 * @mode: the mode with which the new file will be opened
 153 * @fop: the 'struct file_operations' for the new file
 154 *
 155 * Use this instead of get_empty_filp() to get a new
 156 * 'struct file'.  Do so because of the same initialization
 157 * pitfalls reasons listed for init_file().  This is a
 158 * preferred interface to using init_file().
 159 *
 160 * If all the callers of init_file() are eliminated, its
 161 * code should be moved into this function.
 162 */
 163struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry,
 164                fmode_t mode, const struct file_operations *fop)
 165{
 166        struct file *file;
 167        struct path;
 168
 169        file = get_empty_filp();
 170        if (!file)
 171                return NULL;
 172
 173        init_file(file, mnt, dentry, mode, fop);
 174        return file;
 175}
 176EXPORT_SYMBOL(alloc_file);
 177
 178/**
 179 * init_file - initialize a 'struct file'
 180 * @file: the already allocated 'struct file' to initialized
 181 * @mnt: the vfsmount on which the file resides
 182 * @dentry: the dentry representing this file
 183 * @mode: the mode the file is opened with
 184 * @fop: the 'struct file_operations' for this file
 185 *
 186 * Use this instead of setting the members directly.  Doing so
 187 * avoids making mistakes like forgetting the mntget() or
 188 * forgetting to take a write on the mnt.
 189 *
 190 * Note: This is a crappy interface.  It is here to make
 191 * merging with the existing users of get_empty_filp()
 192 * who have complex failure logic easier.  All users
 193 * of this should be moving to alloc_file().
 194 */
 195int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry,
 196           fmode_t mode, const struct file_operations *fop)
 197{
 198        int error = 0;
 199        file->f_path.dentry = dentry;
 200        file->f_path.mnt = mntget(mnt);
 201        file->f_mapping = dentry->d_inode->i_mapping;
 202        file->f_mode = mode;
 203        file->f_op = fop;
 204
 205        /*
 206         * These mounts don't really matter in practice
 207         * for r/o bind mounts.  They aren't userspace-
 208         * visible.  We do this for consistency, and so
 209         * that we can do debugging checks at __fput()
 210         */
 211        if ((mode & FMODE_WRITE) && !special_file(dentry->d_inode->i_mode)) {
 212                file_take_write(file);
 213                error = mnt_want_write(mnt);
 214                WARN_ON(error);
 215        }
 216        return error;
 217}
 218EXPORT_SYMBOL(init_file);
 219
 220void fput(struct file *file)
 221{
 222        if (atomic_long_dec_and_test(&file->f_count))
 223                __fput(file);
 224}
 225
 226EXPORT_SYMBOL(fput);
 227
 228/**
 229 * drop_file_write_access - give up ability to write to a file
 230 * @file: the file to which we will stop writing
 231 *
 232 * This is a central place which will give up the ability
 233 * to write to @file, along with access to write through
 234 * its vfsmount.
 235 */
 236void drop_file_write_access(struct file *file)
 237{
 238        struct vfsmount *mnt = file->f_path.mnt;
 239        struct dentry *dentry = file->f_path.dentry;
 240        struct inode *inode = dentry->d_inode;
 241
 242        put_write_access(inode);
 243
 244        if (special_file(inode->i_mode))
 245                return;
 246        if (file_check_writeable(file) != 0)
 247                return;
 248        mnt_drop_write(mnt);
 249        file_release_write(file);
 250}
 251EXPORT_SYMBOL_GPL(drop_file_write_access);
 252
 253/* __fput is called from task context when aio completion releases the last
 254 * last use of a struct file *.  Do not use otherwise.
 255 */
 256void __fput(struct file *file)
 257{
 258        struct dentry *dentry = file->f_path.dentry;
 259        struct vfsmount *mnt = file->f_path.mnt;
 260        struct inode *inode = dentry->d_inode;
 261
 262        might_sleep();
 263
 264        fsnotify_close(file);
 265        /*
 266         * The function eventpoll_release() should be the first called
 267         * in the file cleanup chain.
 268         */
 269        eventpoll_release(file);
 270        locks_remove_flock(file);
 271
 272        if (unlikely(file->f_flags & FASYNC)) {
 273                if (file->f_op && file->f_op->fasync)
 274                        file->f_op->fasync(-1, file, 0);
 275        }
 276        if (file->f_op && file->f_op->release)
 277                file->f_op->release(inode, file);
 278        security_file_free(file);
 279        if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
 280                cdev_put(inode->i_cdev);
 281        fops_put(file->f_op);
 282        put_pid(file->f_owner.pid);
 283        file_kill(file);
 284        if (file->f_mode & FMODE_WRITE)
 285                drop_file_write_access(file);
 286        file->f_path.dentry = NULL;
 287        file->f_path.mnt = NULL;
 288        file_free(file);
 289        dput(dentry);
 290        mntput(mnt);
 291}
 292
 293struct file *fget(unsigned int fd)
 294{
 295        struct file *file;
 296        struct files_struct *files = current->files;
 297
 298        rcu_read_lock();
 299        file = fcheck_files(files, fd);
 300        if (file) {
 301                if (!atomic_long_inc_not_zero(&file->f_count)) {
 302                        /* File object ref couldn't be taken */
 303                        rcu_read_unlock();
 304                        return NULL;
 305                }
 306        }
 307        rcu_read_unlock();
 308
 309        return file;
 310}
 311
 312EXPORT_SYMBOL(fget);
 313
 314/*
 315 * Lightweight file lookup - no refcnt increment if fd table isn't shared. 
 316 * You can use this only if it is guranteed that the current task already 
 317 * holds a refcnt to that file. That check has to be done at fget() only
 318 * and a flag is returned to be passed to the corresponding fput_light().
 319 * There must not be a cloning between an fget_light/fput_light pair.
 320 */
 321struct file *fget_light(unsigned int fd, int *fput_needed)
 322{
 323        struct file *file;
 324        struct files_struct *files = current->files;
 325
 326        *fput_needed = 0;
 327        if (likely((atomic_read(&files->count) == 1))) {
 328                file = fcheck_files(files, fd);
 329        } else {
 330                rcu_read_lock();
 331                file = fcheck_files(files, fd);
 332                if (file) {
 333                        if (atomic_long_inc_not_zero(&file->f_count))
 334                                *fput_needed = 1;
 335                        else
 336                                /* Didn't get the reference, someone's freed */
 337                                file = NULL;
 338                }
 339                rcu_read_unlock();
 340        }
 341
 342        return file;
 343}
 344
 345
 346void put_filp(struct file *file)
 347{
 348        if (atomic_long_dec_and_test(&file->f_count)) {
 349                security_file_free(file);
 350                file_kill(file);
 351                file_free(file);
 352        }
 353}
 354
 355void file_move(struct file *file, struct list_head *list)
 356{
 357        if (!list)
 358                return;
 359        file_list_lock();
 360        list_move(&file->f_u.fu_list, list);
 361        file_list_unlock();
 362}
 363
 364void file_kill(struct file *file)
 365{
 366        if (!list_empty(&file->f_u.fu_list)) {
 367                file_list_lock();
 368                list_del_init(&file->f_u.fu_list);
 369                file_list_unlock();
 370        }
 371}
 372
 373int fs_may_remount_ro(struct super_block *sb)
 374{
 375        struct file *file;
 376
 377        /* Check that no files are currently opened for writing. */
 378        file_list_lock();
 379        list_for_each_entry(file, &sb->s_files, f_u.fu_list) {
 380                struct inode *inode = file->f_path.dentry->d_inode;
 381
 382                /* File with pending delete? */
 383                if (inode->i_nlink == 0)
 384                        goto too_bad;
 385
 386                /* Writeable file? */
 387                if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
 388                        goto too_bad;
 389        }
 390        file_list_unlock();
 391        return 1; /* Tis' cool bro. */
 392too_bad:
 393        file_list_unlock();
 394        return 0;
 395}
 396
 397void __init files_init(unsigned long mempages)
 398{ 
 399        int n; 
 400        /* One file with associated inode and dcache is very roughly 1K. 
 401         * Per default don't use more than 10% of our memory for files. 
 402         */ 
 403
 404        n = (mempages * (PAGE_SIZE / 1024)) / 10;
 405        files_stat.max_files = n; 
 406        if (files_stat.max_files < NR_FILE)
 407                files_stat.max_files = NR_FILE;
 408        files_defer_init();
 409        percpu_counter_init(&nr_files, 0);
 410} 
 411
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.