linux/drivers/video/fb_defio.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/video/fb_defio.c
   3 *
   4 *  Copyright (C) 2006 Jaya Kumar
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License. See the file COPYING in the main directory of this archive
   8 * for more details.
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/kernel.h>
  13#include <linux/errno.h>
  14#include <linux/string.h>
  15#include <linux/mm.h>
  16#include <linux/slab.h>
  17#include <linux/vmalloc.h>
  18#include <linux/delay.h>
  19#include <linux/interrupt.h>
  20#include <linux/fb.h>
  21#include <linux/list.h>
  22
  23/* to support deferred IO */
  24#include <linux/rmap.h>
  25#include <linux/pagemap.h>
  26
  27/* this is to find and return the vmalloc-ed fb pages */
  28static int fb_deferred_io_fault(struct vm_area_struct *vma,
  29                                struct vm_fault *vmf)
  30{
  31        unsigned long offset;
  32        struct page *page;
  33        struct fb_info *info = vma->vm_private_data;
  34        /* info->screen_base is virtual memory */
  35        void *screen_base = (void __force *) info->screen_base;
  36
  37        offset = vmf->pgoff << PAGE_SHIFT;
  38        if (offset >= info->fix.smem_len)
  39                return VM_FAULT_SIGBUS;
  40
  41        page = vmalloc_to_page(screen_base + offset);
  42        if (!page)
  43                return VM_FAULT_SIGBUS;
  44
  45        get_page(page);
  46
  47        if (vma->vm_file)
  48                page->mapping = vma->vm_file->f_mapping;
  49        else
  50                printk(KERN_ERR "no mapping available\n");
  51
  52        BUG_ON(!page->mapping);
  53        page->index = vmf->pgoff;
  54
  55        vmf->page = page;
  56        return 0;
  57}
  58
  59int fb_deferred_io_fsync(struct file *file, struct dentry *dentry, int datasync)
  60{
  61        struct fb_info *info = file->private_data;
  62
  63        /* Kill off the delayed work */
  64        cancel_rearming_delayed_work(&info->deferred_work);
  65
  66        /* Run it immediately */
  67        return schedule_delayed_work(&info->deferred_work, 0);
  68}
  69EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
  70
  71/* vm_ops->page_mkwrite handler */
  72static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
  73                                  struct page *page)
  74{
  75        struct fb_info *info = vma->vm_private_data;
  76        struct fb_deferred_io *fbdefio = info->fbdefio;
  77        struct page *cur;
  78
  79        /* this is a callback we get when userspace first tries to
  80        write to the page. we schedule a workqueue. that workqueue
  81        will eventually mkclean the touched pages and execute the
  82        deferred framebuffer IO. then if userspace touches a page
  83        again, we repeat the same scheme */
  84
  85        /* protect against the workqueue changing the page list */
  86        mutex_lock(&fbdefio->lock);
  87
  88        /* we loop through the pagelist before adding in order
  89        to keep the pagelist sorted */
  90        list_for_each_entry(cur, &fbdefio->pagelist, lru) {
  91                /* this check is to catch the case where a new
  92                process could start writing to the same page
  93                through a new pte. this new access can cause the
  94                mkwrite even when the original ps's pte is marked
  95                writable */
  96                if (unlikely(cur == page))
  97                        goto page_already_added;
  98                else if (cur->index > page->index)
  99                        break;
 100        }
 101
 102        list_add_tail(&page->lru, &cur->lru);
 103
 104page_already_added:
 105        mutex_unlock(&fbdefio->lock);
 106
 107        /* come back after delay to process the deferred IO */
 108        schedule_delayed_work(&info->deferred_work, fbdefio->delay);
 109        return 0;
 110}
 111
 112static struct vm_operations_struct fb_deferred_io_vm_ops = {
 113        .fault          = fb_deferred_io_fault,
 114        .page_mkwrite   = fb_deferred_io_mkwrite,
 115};
 116
 117static int fb_deferred_io_set_page_dirty(struct page *page)
 118{
 119        if (!PageDirty(page))
 120                SetPageDirty(page);
 121        return 0;
 122}
 123
 124static const struct address_space_operations fb_deferred_io_aops = {
 125        .set_page_dirty = fb_deferred_io_set_page_dirty,
 126};
 127
 128static int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
 129{
 130        vma->vm_ops = &fb_deferred_io_vm_ops;
 131        vma->vm_flags |= ( VM_IO | VM_RESERVED | VM_DONTEXPAND );
 132        vma->vm_private_data = info;
 133        return 0;
 134}
 135
 136/* workqueue callback */
 137static void fb_deferred_io_work(struct work_struct *work)
 138{
 139        struct fb_info *info = container_of(work, struct fb_info,
 140                                                deferred_work.work);
 141        struct list_head *node, *next;
 142        struct page *cur;
 143        struct fb_deferred_io *fbdefio = info->fbdefio;
 144
 145        /* here we mkclean the pages, then do all deferred IO */
 146        mutex_lock(&fbdefio->lock);
 147        list_for_each_entry(cur, &fbdefio->pagelist, lru) {
 148                lock_page(cur);
 149                page_mkclean(cur);
 150                unlock_page(cur);
 151        }
 152
 153        /* driver's callback with pagelist */
 154        fbdefio->deferred_io(info, &fbdefio->pagelist);
 155
 156        /* clear the list */
 157        list_for_each_safe(node, next, &fbdefio->pagelist) {
 158                list_del(node);
 159        }
 160        mutex_unlock(&fbdefio->lock);
 161}
 162
 163void fb_deferred_io_init(struct fb_info *info)
 164{
 165        struct fb_deferred_io *fbdefio = info->fbdefio;
 166
 167        BUG_ON(!fbdefio);
 168        mutex_init(&fbdefio->lock);
 169        info->fbops->fb_mmap = fb_deferred_io_mmap;
 170        INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
 171        INIT_LIST_HEAD(&fbdefio->pagelist);
 172        if (fbdefio->delay == 0) /* set a default of 1 s */
 173                fbdefio->delay = HZ;
 174}
 175EXPORT_SYMBOL_GPL(fb_deferred_io_init);
 176
 177void fb_deferred_io_open(struct fb_info *info,
 178                         struct inode *inode,
 179                         struct file *file)
 180{
 181        file->f_mapping->a_ops = &fb_deferred_io_aops;
 182}
 183EXPORT_SYMBOL_GPL(fb_deferred_io_open);
 184
 185void fb_deferred_io_cleanup(struct fb_info *info)
 186{
 187        void *screen_base = (void __force *) info->screen_base;
 188        struct fb_deferred_io *fbdefio = info->fbdefio;
 189        struct page *page;
 190        int i;
 191
 192        BUG_ON(!fbdefio);
 193        cancel_delayed_work(&info->deferred_work);
 194        flush_scheduled_work();
 195
 196        /* clear out the mapping that we setup */
 197        for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
 198                page = vmalloc_to_page(screen_base + i);
 199                page->mapping = NULL;
 200        }
 201}
 202EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
 203
 204MODULE_LICENSE("GPL");
 205
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.