linux/mm/filemap_xip.c
<<
>>
Prefs
   1/*
   2 *      linux/mm/filemap_xip.c
   3 *
   4 * Copyright (C) 2005 IBM Corporation
   5 * Author: Carsten Otte <cotte@de.ibm.com>
   6 *
   7 * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
   8 *
   9 */
  10
  11#include <linux/fs.h>
  12#include <linux/pagemap.h>
  13#include <linux/export.h>
  14#include <linux/uio.h>
  15#include <linux/rmap.h>
  16#include <linux/mmu_notifier.h>
  17#include <linux/sched.h>
  18#include <linux/seqlock.h>
  19#include <linux/mutex.h>
  20#include <linux/gfp.h>
  21#include <asm/tlbflush.h>
  22#include <asm/io.h>
  23
  24/*
  25 * We do use our own empty page to avoid interference with other users
  26 * of ZERO_PAGE(), such as /dev/zero
  27 */
  28static DEFINE_MUTEX(xip_sparse_mutex);
  29static seqcount_t xip_sparse_seq = SEQCNT_ZERO;
  30static struct page *__xip_sparse_page;
  31
  32/* called under xip_sparse_mutex */
  33static struct page *xip_sparse_page(void)
  34{
  35        if (!__xip_sparse_page) {
  36                struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
  37
  38                if (page)
  39                        __xip_sparse_page = page;
  40        }
  41        return __xip_sparse_page;
  42}
  43
  44/*
  45 * This is a file read routine for execute in place files, and uses
  46 * the mapping->a_ops->get_xip_mem() function for the actual low-level
  47 * stuff.
  48 *
  49 * Note the struct file* is not used at all.  It may be NULL.
  50 */
  51static ssize_t
  52do_xip_mapping_read(struct address_space *mapping,
  53                    struct file_ra_state *_ra,
  54                    struct file *filp,
  55                    char __user *buf,
  56                    size_t len,
  57                    loff_t *ppos)
  58{
  59        struct inode *inode = mapping->host;
  60        pgoff_t index, end_index;
  61        unsigned long offset;
  62        loff_t isize, pos;
  63        size_t copied = 0, error = 0;
  64
  65        BUG_ON(!mapping->a_ops->get_xip_mem);
  66
  67        pos = *ppos;
  68        index = pos >> PAGE_CACHE_SHIFT;
  69        offset = pos & ~PAGE_CACHE_MASK;
  70
  71        isize = i_size_read(inode);
  72        if (!isize)
  73                goto out;
  74
  75        end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
  76        do {
  77                unsigned long nr, left;
  78                void *xip_mem;
  79                unsigned long xip_pfn;
  80                int zero = 0;
  81
  82                /* nr is the maximum number of bytes to copy from this page */
  83                nr = PAGE_CACHE_SIZE;
  84                if (index >= end_index) {
  85                        if (index > end_index)
  86                                goto out;
  87                        nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
  88                        if (nr <= offset) {
  89                                goto out;
  90                        }
  91                }
  92                nr = nr - offset;
  93                if (nr > len - copied)
  94                        nr = len - copied;
  95
  96                error = mapping->a_ops->get_xip_mem(mapping, index, 0,
  97                                                        &xip_mem, &xip_pfn);
  98                if (unlikely(error)) {
  99                        if (error == -ENODATA) {
 100                                /* sparse */
 101                                zero = 1;
 102                        } else
 103                                goto out;
 104                }
 105
 106                /* If users can be writing to this page using arbitrary
 107                 * virtual addresses, take care about potential aliasing
 108                 * before reading the page on the kernel side.
 109                 */
 110                if (mapping_writably_mapped(mapping))
 111                        /* address based flush */ ;
 112
 113                /*
 114                 * Ok, we have the mem, so now we can copy it to user space...
 115                 *
 116                 * The actor routine returns how many bytes were actually used..
 117                 * NOTE! This may not be the same as how much of a user buffer
 118                 * we filled up (we may be padding etc), so we can only update
 119                 * "pos" here (the actor routine has to update the user buffer
 120                 * pointers and the remaining count).
 121                 */
 122                if (!zero)
 123                        left = __copy_to_user(buf+copied, xip_mem+offset, nr);
 124                else
 125                        left = __clear_user(buf + copied, nr);
 126
 127                if (left) {
 128                        error = -EFAULT;
 129                        goto out;
 130                }
 131
 132                copied += (nr - left);
 133                offset += (nr - left);
 134                index += offset >> PAGE_CACHE_SHIFT;
 135                offset &= ~PAGE_CACHE_MASK;
 136        } while (copied < len);
 137
 138out:
 139        *ppos = pos + copied;
 140        if (filp)
 141                file_accessed(filp);
 142
 143        return (copied ? copied : error);
 144}
 145
 146ssize_t
 147xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
 148{
 149        if (!access_ok(VERIFY_WRITE, buf, len))
 150                return -EFAULT;
 151
 152        return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
 153                            buf, len, ppos);
 154}
 155EXPORT_SYMBOL_GPL(xip_file_read);
 156
 157/*
 158 * __xip_unmap is invoked from xip_unmap and
 159 * xip_write
 160 *
 161 * This function walks all vmas of the address_space and unmaps the
 162 * __xip_sparse_page when found at pgoff.
 163 */
 164static void
 165__xip_unmap (struct address_space * mapping,
 166                     unsigned long pgoff)
 167{
 168        struct vm_area_struct *vma;
 169        struct mm_struct *mm;
 170        struct prio_tree_iter iter;
 171        unsigned long address;
 172        pte_t *pte;
 173        pte_t pteval;
 174        spinlock_t *ptl;
 175        struct page *page;
 176        unsigned count;
 177        int locked = 0;
 178
 179        count = read_seqcount_begin(&xip_sparse_seq);
 180
 181        page = __xip_sparse_page;
 182        if (!page)
 183                return;
 184
 185retry:
 186        mutex_lock(&mapping->i_mmap_mutex);
 187        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
 188                mm = vma->vm_mm;
 189                address = vma->vm_start +
 190                        ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 191                BUG_ON(address < vma->vm_start || address >= vma->vm_end);
 192                pte = page_check_address(page, mm, address, &ptl, 1);
 193                if (pte) {
 194                        /* Nuke the page table entry. */
 195                        flush_cache_page(vma, address, pte_pfn(*pte));
 196                        pteval = ptep_clear_flush_notify(vma, address, pte);
 197                        page_remove_rmap(page);
 198                        dec_mm_counter(mm, MM_FILEPAGES);
 199                        BUG_ON(pte_dirty(pteval));
 200                        pte_unmap_unlock(pte, ptl);
 201                        page_cache_release(page);
 202                }
 203        }
 204        mutex_unlock(&mapping->i_mmap_mutex);
 205
 206        if (locked) {
 207                mutex_unlock(&xip_sparse_mutex);
 208        } else if (read_seqcount_retry(&xip_sparse_seq, count)) {
 209                mutex_lock(&xip_sparse_mutex);
 210                locked = 1;
 211                goto retry;
 212        }
 213}
 214
 215/*
 216 * xip_fault() is invoked via the vma operations vector for a
 217 * mapped memory region to read in file data during a page fault.
 218 *
 219 * This function is derived from filemap_fault, but used for execute in place
 220 */
 221static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 222{
 223        struct file *file = vma->vm_file;
 224        struct address_space *mapping = file->f_mapping;
 225        struct inode *inode = mapping->host;
 226        pgoff_t size;
 227        void *xip_mem;
 228        unsigned long xip_pfn;
 229        struct page *page;
 230        int error;
 231
 232        /* XXX: are VM_FAULT_ codes OK? */
 233again:
 234        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
 235        if (vmf->pgoff >= size)
 236                return VM_FAULT_SIGBUS;
 237
 238        error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
 239                                                &xip_mem, &xip_pfn);
 240        if (likely(!error))
 241                goto found;
 242        if (error != -ENODATA)
 243                return VM_FAULT_OOM;
 244
 245        /* sparse block */
 246        if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
 247            (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
 248            (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
 249                int err;
 250
 251                /* maybe shared writable, allocate new block */
 252                mutex_lock(&xip_sparse_mutex);
 253                error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
 254                                                        &xip_mem, &xip_pfn);
 255                mutex_unlock(&xip_sparse_mutex);
 256                if (error)
 257                        return VM_FAULT_SIGBUS;
 258                /* unmap sparse mappings at pgoff from all other vmas */
 259                __xip_unmap(mapping, vmf->pgoff);
 260
 261found:
 262                err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
 263                                                        xip_pfn);
 264                if (err == -ENOMEM)
 265                        return VM_FAULT_OOM;
 266                /*
 267                 * err == -EBUSY is fine, we've raced against another thread
 268                 * that faulted-in the same page
 269                 */
 270                if (err != -EBUSY)
 271                        BUG_ON(err);
 272                return VM_FAULT_NOPAGE;
 273        } else {
 274                int err, ret = VM_FAULT_OOM;
 275
 276                mutex_lock(&xip_sparse_mutex);
 277                write_seqcount_begin(&xip_sparse_seq);
 278                error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
 279                                                        &xip_mem, &xip_pfn);
 280                if (unlikely(!error)) {
 281                        write_seqcount_end(&xip_sparse_seq);
 282                        mutex_unlock(&xip_sparse_mutex);
 283                        goto again;
 284                }
 285                if (error != -ENODATA)
 286                        goto out;
 287                /* not shared and writable, use xip_sparse_page() */
 288                page = xip_sparse_page();
 289                if (!page)
 290                        goto out;
 291                err = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
 292                                                        page);
 293                if (err == -ENOMEM)
 294                        goto out;
 295
 296                ret = VM_FAULT_NOPAGE;
 297out:
 298                write_seqcount_end(&xip_sparse_seq);
 299                mutex_unlock(&xip_sparse_mutex);
 300
 301                return ret;
 302        }
 303}
 304
 305static const struct vm_operations_struct xip_file_vm_ops = {
 306        .fault  = xip_file_fault,
 307};
 308
 309int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
 310{
 311        BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
 312
 313        file_accessed(file);
 314        vma->vm_ops = &xip_file_vm_ops;
 315        vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
 316        return 0;
 317}
 318EXPORT_SYMBOL_GPL(xip_file_mmap);
 319
 320static ssize_t
 321__xip_file_write(struct file *filp, const char __user *buf,
 322                  size_t count, loff_t pos, loff_t *ppos)
 323{
 324        struct address_space * mapping = filp->f_mapping;
 325        const struct address_space_operations *a_ops = mapping->a_ops;
 326        struct inode    *inode = mapping->host;
 327        long            status = 0;
 328        size_t          bytes;
 329        ssize_t         written = 0;
 330
 331        BUG_ON(!mapping->a_ops->get_xip_mem);
 332
 333        do {
 334                unsigned long index;
 335                unsigned long offset;
 336                size_t copied;
 337                void *xip_mem;
 338                unsigned long xip_pfn;
 339
 340                offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
 341                index = pos >> PAGE_CACHE_SHIFT;
 342                bytes = PAGE_CACHE_SIZE - offset;
 343                if (bytes > count)
 344                        bytes = count;
 345
 346                status = a_ops->get_xip_mem(mapping, index, 0,
 347                                                &xip_mem, &xip_pfn);
 348                if (status == -ENODATA) {
 349                        /* we allocate a new page unmap it */
 350                        mutex_lock(&xip_sparse_mutex);
 351                        status = a_ops->get_xip_mem(mapping, index, 1,
 352                                                        &xip_mem, &xip_pfn);
 353                        mutex_unlock(&xip_sparse_mutex);
 354                        if (!status)
 355                                /* unmap page at pgoff from all other vmas */
 356                                __xip_unmap(mapping, index);
 357                }
 358
 359                if (status)
 360                        break;
 361
 362                copied = bytes -
 363                        __copy_from_user_nocache(xip_mem + offset, buf, bytes);
 364
 365                if (likely(copied > 0)) {
 366                        status = copied;
 367
 368                        if (status >= 0) {
 369                                written += status;
 370                                count -= status;
 371                                pos += status;
 372                                buf += status;
 373                        }
 374                }
 375                if (unlikely(copied != bytes))
 376                        if (status >= 0)
 377                                status = -EFAULT;
 378                if (status < 0)
 379                        break;
 380        } while (count);
 381        *ppos = pos;
 382        /*
 383         * No need to use i_size_read() here, the i_size
 384         * cannot change under us because we hold i_mutex.
 385         */
 386        if (pos > inode->i_size) {
 387                i_size_write(inode, pos);
 388                mark_inode_dirty(inode);
 389        }
 390
 391        return written ? written : status;
 392}
 393
 394ssize_t
 395xip_file_write(struct file *filp, const char __user *buf, size_t len,
 396               loff_t *ppos)
 397{
 398        struct address_space *mapping = filp->f_mapping;
 399        struct inode *inode = mapping->host;
 400        size_t count;
 401        loff_t pos;
 402        ssize_t ret;
 403
 404        mutex_lock(&inode->i_mutex);
 405
 406        if (!access_ok(VERIFY_READ, buf, len)) {
 407                ret=-EFAULT;
 408                goto out_up;
 409        }
 410
 411        pos = *ppos;
 412        count = len;
 413
 414        vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
 415
 416        /* We can write back this queue in page reclaim */
 417        current->backing_dev_info = mapping->backing_dev_info;
 418
 419        ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
 420        if (ret)
 421                goto out_backing;
 422        if (count == 0)
 423                goto out_backing;
 424
 425        ret = file_remove_suid(filp);
 426        if (ret)
 427                goto out_backing;
 428
 429        ret = file_update_time(filp);
 430        if (ret)
 431                goto out_backing;
 432
 433        ret = __xip_file_write (filp, buf, count, pos, ppos);
 434
 435 out_backing:
 436        current->backing_dev_info = NULL;
 437 out_up:
 438        mutex_unlock(&inode->i_mutex);
 439        return ret;
 440}
 441EXPORT_SYMBOL_GPL(xip_file_write);
 442
 443/*
 444 * truncate a page used for execute in place
 445 * functionality is analog to block_truncate_page but does use get_xip_mem
 446 * to get the page instead of page cache
 447 */
 448int
 449xip_truncate_page(struct address_space *mapping, loff_t from)
 450{
 451        pgoff_t index = from >> PAGE_CACHE_SHIFT;
 452        unsigned offset = from & (PAGE_CACHE_SIZE-1);
 453        unsigned blocksize;
 454        unsigned length;
 455        void *xip_mem;
 456        unsigned long xip_pfn;
 457        int err;
 458
 459        BUG_ON(!mapping->a_ops->get_xip_mem);
 460
 461        blocksize = 1 << mapping->host->i_blkbits;
 462        length = offset & (blocksize - 1);
 463
 464        /* Block boundary? Nothing to do */
 465        if (!length)
 466                return 0;
 467
 468        length = blocksize - length;
 469
 470        err = mapping->a_ops->get_xip_mem(mapping, index, 0,
 471                                                &xip_mem, &xip_pfn);
 472        if (unlikely(err)) {
 473                if (err == -ENODATA)
 474                        /* Hole? No need to truncate */
 475                        return 0;
 476                else
 477                        return err;
 478        }
 479        memset(xip_mem + offset, 0, length);
 480        return 0;
 481}
 482EXPORT_SYMBOL_GPL(xip_truncate_page);
 483
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.