linux/drivers/char/mem.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/char/mem.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 *  Added devfs support.
   7 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
   8 *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
   9 */
  10
  11#include <linux/mm.h>
  12#include <linux/miscdevice.h>
  13#include <linux/slab.h>
  14#include <linux/vmalloc.h>
  15#include <linux/mman.h>
  16#include <linux/random.h>
  17#include <linux/init.h>
  18#include <linux/raw.h>
  19#include <linux/tty.h>
  20#include <linux/capability.h>
  21#include <linux/ptrace.h>
  22#include <linux/device.h>
  23#include <linux/highmem.h>
  24#include <linux/backing-dev.h>
  25#include <linux/shmem_fs.h>
  26#include <linux/splice.h>
  27#include <linux/pfn.h>
  28#include <linux/export.h>
  29#include <linux/io.h>
  30#include <linux/uio.h>
  31
  32#include <linux/uaccess.h>
  33
  34#ifdef CONFIG_IA64
  35# include <linux/efi.h>
  36#endif
  37
  38#define DEVPORT_MINOR   4
  39
  40static inline unsigned long size_inside_page(unsigned long start,
  41                                             unsigned long size)
  42{
  43        unsigned long sz;
  44
  45        sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
  46
  47        return min(sz, size);
  48}
  49
  50#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  51static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
  52{
  53        return addr + count <= __pa(high_memory);
  54}
  55
  56static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  57{
  58        return 1;
  59}
  60#endif
  61
  62#ifdef CONFIG_STRICT_DEVMEM
  63static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  64{
  65        u64 from = ((u64)pfn) << PAGE_SHIFT;
  66        u64 to = from + size;
  67        u64 cursor = from;
  68
  69        while (cursor < to) {
  70                if (!devmem_is_allowed(pfn))
  71                        return 0;
  72                cursor += PAGE_SIZE;
  73                pfn++;
  74        }
  75        return 1;
  76}
  77#else
  78static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  79{
  80        return 1;
  81}
  82#endif
  83
  84#ifndef unxlate_dev_mem_ptr
  85#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
  86void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
  87{
  88}
  89#endif
  90
  91/*
  92 * This funcion reads the *physical* memory. The f_pos points directly to the
  93 * memory location.
  94 */
  95static ssize_t read_mem(struct file *file, char __user *buf,
  96                        size_t count, loff_t *ppos)
  97{
  98        phys_addr_t p = *ppos;
  99        ssize_t read, sz;
 100        void *ptr;
 101
 102        if (p != *ppos)
 103                return 0;
 104
 105        if (!valid_phys_addr_range(p, count))
 106                return -EFAULT;
 107        read = 0;
 108#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 109        /* we don't have page 0 mapped on sparc and m68k.. */
 110        if (p < PAGE_SIZE) {
 111                sz = size_inside_page(p, count);
 112                if (sz > 0) {
 113                        if (clear_user(buf, sz))
 114                                return -EFAULT;
 115                        buf += sz;
 116                        p += sz;
 117                        count -= sz;
 118                        read += sz;
 119                }
 120        }
 121#endif
 122
 123        while (count > 0) {
 124                unsigned long remaining;
 125
 126                sz = size_inside_page(p, count);
 127
 128                if (!range_is_allowed(p >> PAGE_SHIFT, count))
 129                        return -EPERM;
 130
 131                /*
 132                 * On ia64 if a page has been mapped somewhere as uncached, then
 133                 * it must also be accessed uncached by the kernel or data
 134                 * corruption may occur.
 135                 */
 136                ptr = xlate_dev_mem_ptr(p);
 137                if (!ptr)
 138                        return -EFAULT;
 139
 140                remaining = copy_to_user(buf, ptr, sz);
 141                unxlate_dev_mem_ptr(p, ptr);
 142                if (remaining)
 143                        return -EFAULT;
 144
 145                buf += sz;
 146                p += sz;
 147                count -= sz;
 148                read += sz;
 149        }
 150
 151        *ppos += read;
 152        return read;
 153}
 154
 155static ssize_t write_mem(struct file *file, const char __user *buf,
 156                         size_t count, loff_t *ppos)
 157{
 158        phys_addr_t p = *ppos;
 159        ssize_t written, sz;
 160        unsigned long copied;
 161        void *ptr;
 162
 163        if (p != *ppos)
 164                return -EFBIG;
 165
 166        if (!valid_phys_addr_range(p, count))
 167                return -EFAULT;
 168
 169        written = 0;
 170
 171#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 172        /* we don't have page 0 mapped on sparc and m68k.. */
 173        if (p < PAGE_SIZE) {
 174                sz = size_inside_page(p, count);
 175                /* Hmm. Do something? */
 176                buf += sz;
 177                p += sz;
 178                count -= sz;
 179                written += sz;
 180        }
 181#endif
 182
 183        while (count > 0) {
 184                sz = size_inside_page(p, count);
 185
 186                if (!range_is_allowed(p >> PAGE_SHIFT, sz))
 187                        return -EPERM;
 188
 189                /*
 190                 * On ia64 if a page has been mapped somewhere as uncached, then
 191                 * it must also be accessed uncached by the kernel or data
 192                 * corruption may occur.
 193                 */
 194                ptr = xlate_dev_mem_ptr(p);
 195                if (!ptr) {
 196                        if (written)
 197                                break;
 198                        return -EFAULT;
 199                }
 200
 201                copied = copy_from_user(ptr, buf, sz);
 202                unxlate_dev_mem_ptr(p, ptr);
 203                if (copied) {
 204                        written += sz - copied;
 205                        if (written)
 206                                break;
 207                        return -EFAULT;
 208                }
 209
 210                buf += sz;
 211                p += sz;
 212                count -= sz;
 213                written += sz;
 214        }
 215
 216        *ppos += written;
 217        return written;
 218}
 219
 220int __weak phys_mem_access_prot_allowed(struct file *file,
 221        unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
 222{
 223        return 1;
 224}
 225
 226#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
 227
 228/*
 229 * Architectures vary in how they handle caching for addresses
 230 * outside of main memory.
 231 *
 232 */
 233#ifdef pgprot_noncached
 234static int uncached_access(struct file *file, phys_addr_t addr)
 235{
 236#if defined(CONFIG_IA64)
 237        /*
 238         * On ia64, we ignore O_DSYNC because we cannot tolerate memory
 239         * attribute aliases.
 240         */
 241        return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
 242#elif defined(CONFIG_MIPS)
 243        {
 244                extern int __uncached_access(struct file *file,
 245                                             unsigned long addr);
 246
 247                return __uncached_access(file, addr);
 248        }
 249#else
 250        /*
 251         * Accessing memory above the top the kernel knows about or through a
 252         * file pointer
 253         * that was marked O_DSYNC will be done non-cached.
 254         */
 255        if (file->f_flags & O_DSYNC)
 256                return 1;
 257        return addr >= __pa(high_memory);
 258#endif
 259}
 260#endif
 261
 262static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 263                                     unsigned long size, pgprot_t vma_prot)
 264{
 265#ifdef pgprot_noncached
 266        phys_addr_t offset = pfn << PAGE_SHIFT;
 267
 268        if (uncached_access(file, offset))
 269                return pgprot_noncached(vma_prot);
 270#endif
 271        return vma_prot;
 272}
 273#endif
 274
 275#ifndef CONFIG_MMU
 276static unsigned long get_unmapped_area_mem(struct file *file,
 277                                           unsigned long addr,
 278                                           unsigned long len,
 279                                           unsigned long pgoff,
 280                                           unsigned long flags)
 281{
 282        if (!valid_mmap_phys_addr_range(pgoff, len))
 283                return (unsigned long) -EINVAL;
 284        return pgoff << PAGE_SHIFT;
 285}
 286
 287/* permit direct mmap, for read, write or exec */
 288static unsigned memory_mmap_capabilities(struct file *file)
 289{
 290        return NOMMU_MAP_DIRECT |
 291                NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
 292}
 293
 294static unsigned zero_mmap_capabilities(struct file *file)
 295{
 296        return NOMMU_MAP_COPY;
 297}
 298
 299/* can't do an in-place private mapping if there's no MMU */
 300static inline int private_mapping_ok(struct vm_area_struct *vma)
 301{
 302        return vma->vm_flags & VM_MAYSHARE;
 303}
 304#else
 305
 306static inline int private_mapping_ok(struct vm_area_struct *vma)
 307{
 308        return 1;
 309}
 310#endif
 311
 312static const struct vm_operations_struct mmap_mem_ops = {
 313#ifdef CONFIG_HAVE_IOREMAP_PROT
 314        .access = generic_access_phys
 315#endif
 316};
 317
 318static int mmap_mem(struct file *file, struct vm_area_struct *vma)
 319{
 320        size_t size = vma->vm_end - vma->vm_start;
 321
 322        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 323                return -EINVAL;
 324
 325        if (!private_mapping_ok(vma))
 326                return -ENOSYS;
 327
 328        if (!range_is_allowed(vma->vm_pgoff, size))
 329                return -EPERM;
 330
 331        if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
 332                                                &vma->vm_page_prot))
 333                return -EINVAL;
 334
 335        vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
 336                                                 size,
 337                                                 vma->vm_page_prot);
 338
 339        vma->vm_ops = &mmap_mem_ops;
 340
 341        /* Remap-pfn-range will mark the range VM_IO */
 342        if (remap_pfn_range(vma,
 343                            vma->vm_start,
 344                            vma->vm_pgoff,
 345                            size,
 346                            vma->vm_page_prot)) {
 347                return -EAGAIN;
 348        }
 349        return 0;
 350}
 351
 352static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
 353{
 354        unsigned long pfn;
 355
 356        /* Turn a kernel-virtual address into a physical page frame */
 357        pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
 358
 359        /*
 360         * RED-PEN: on some architectures there is more mapped memory than
 361         * available in mem_map which pfn_valid checks for. Perhaps should add a
 362         * new macro here.
 363         *
 364         * RED-PEN: vmalloc is not supported right now.
 365         */
 366        if (!pfn_valid(pfn))
 367                return -EIO;
 368
 369        vma->vm_pgoff = pfn;
 370        return mmap_mem(file, vma);
 371}
 372
 373/*
 374 * This function reads the *virtual* memory as seen by the kernel.
 375 */
 376static ssize_t read_kmem(struct file *file, char __user *buf,
 377                         size_t count, loff_t *ppos)
 378{
 379        unsigned long p = *ppos;
 380        ssize_t low_count, read, sz;
 381        char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
 382        int err = 0;
 383
 384        read = 0;
 385        if (p < (unsigned long) high_memory) {
 386                low_count = count;
 387                if (count > (unsigned long)high_memory - p)
 388                        low_count = (unsigned long)high_memory - p;
 389
 390#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 391                /* we don't have page 0 mapped on sparc and m68k.. */
 392                if (p < PAGE_SIZE && low_count > 0) {
 393                        sz = size_inside_page(p, low_count);
 394                        if (clear_user(buf, sz))
 395                                return -EFAULT;
 396                        buf += sz;
 397                        p += sz;
 398                        read += sz;
 399                        low_count -= sz;
 400                        count -= sz;
 401                }
 402#endif
 403                while (low_count > 0) {
 404                        sz = size_inside_page(p, low_count);
 405
 406                        /*
 407                         * On ia64 if a page has been mapped somewhere as
 408                         * uncached, then it must also be accessed uncached
 409                         * by the kernel or data corruption may occur
 410                         */
 411                        kbuf = xlate_dev_kmem_ptr((void *)p);
 412                        if (!virt_addr_valid(kbuf))
 413                                return -ENXIO;
 414
 415                        if (copy_to_user(buf, kbuf, sz))
 416                                return -EFAULT;
 417                        buf += sz;
 418                        p += sz;
 419                        read += sz;
 420                        low_count -= sz;
 421                        count -= sz;
 422                }
 423        }
 424
 425        if (count > 0) {
 426                kbuf = (char *)__get_free_page(GFP_KERNEL);
 427                if (!kbuf)
 428                        return -ENOMEM;
 429                while (count > 0) {
 430                        sz = size_inside_page(p, count);
 431                        if (!is_vmalloc_or_module_addr((void *)p)) {
 432                                err = -ENXIO;
 433                                break;
 434                        }
 435                        sz = vread(kbuf, (char *)p, sz);
 436                        if (!sz)
 437                                break;
 438                        if (copy_to_user(buf, kbuf, sz)) {
 439                                err = -EFAULT;
 440                                break;
 441                        }
 442                        count -= sz;
 443                        buf += sz;
 444                        read += sz;
 445                        p += sz;
 446                }
 447                free_page((unsigned long)kbuf);
 448        }
 449        *ppos = p;
 450        return read ? read : err;
 451}
 452
 453
 454static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
 455                                size_t count, loff_t *ppos)
 456{
 457        ssize_t written, sz;
 458        unsigned long copied;
 459
 460        written = 0;
 461#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 462        /* we don't have page 0 mapped on sparc and m68k.. */
 463        if (p < PAGE_SIZE) {
 464                sz = size_inside_page(p, count);
 465                /* Hmm. Do something? */
 466                buf += sz;
 467                p += sz;
 468                count -= sz;
 469                written += sz;
 470        }
 471#endif
 472
 473        while (count > 0) {
 474                void *ptr;
 475
 476                sz = size_inside_page(p, count);
 477
 478                /*
 479                 * On ia64 if a page has been mapped somewhere as uncached, then
 480                 * it must also be accessed uncached by the kernel or data
 481                 * corruption may occur.
 482                 */
 483                ptr = xlate_dev_kmem_ptr((void *)p);
 484                if (!virt_addr_valid(ptr))
 485                        return -ENXIO;
 486
 487                copied = copy_from_user(ptr, buf, sz);
 488                if (copied) {
 489                        written += sz - copied;
 490                        if (written)
 491                                break;
 492                        return -EFAULT;
 493                }
 494                buf += sz;
 495                p += sz;
 496                count -= sz;
 497                written += sz;
 498        }
 499
 500        *ppos += written;
 501        return written;
 502}
 503
 504/*
 505 * This function writes to the *virtual* memory as seen by the kernel.
 506 */
 507static ssize_t write_kmem(struct file *file, const char __user *buf,
 508                          size_t count, loff_t *ppos)
 509{
 510        unsigned long p = *ppos;
 511        ssize_t wrote = 0;
 512        ssize_t virtr = 0;
 513        char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
 514        int err = 0;
 515
 516        if (p < (unsigned long) high_memory) {
 517                unsigned long to_write = min_t(unsigned long, count,
 518                                               (unsigned long)high_memory - p);
 519                wrote = do_write_kmem(p, buf, to_write, ppos);
 520                if (wrote != to_write)
 521                        return wrote;
 522                p += wrote;
 523                buf += wrote;
 524                count -= wrote;
 525        }
 526
 527        if (count > 0) {
 528                kbuf = (char *)__get_free_page(GFP_KERNEL);
 529                if (!kbuf)
 530                        return wrote ? wrote : -ENOMEM;
 531                while (count > 0) {
 532                        unsigned long sz = size_inside_page(p, count);
 533                        unsigned long n;
 534
 535                        if (!is_vmalloc_or_module_addr((void *)p)) {
 536                                err = -ENXIO;
 537                                break;
 538                        }
 539                        n = copy_from_user(kbuf, buf, sz);
 540                        if (n) {
 541                                err = -EFAULT;
 542                                break;
 543                        }
 544                        vwrite(kbuf, (char *)p, sz);
 545                        count -= sz;
 546                        buf += sz;
 547                        virtr += sz;
 548                        p += sz;
 549                }
 550                free_page((unsigned long)kbuf);
 551        }
 552
 553        *ppos = p;
 554        return virtr + wrote ? : err;
 555}
 556
 557static ssize_t read_port(struct file *file, char __user *buf,
 558                         size_t count, loff_t *ppos)
 559{
 560        unsigned long i = *ppos;
 561        char __user *tmp = buf;
 562
 563        if (!access_ok(VERIFY_WRITE, buf, count))
 564                return -EFAULT;
 565        while (count-- > 0 && i < 65536) {
 566                if (__put_user(inb(i), tmp) < 0)
 567                        return -EFAULT;
 568                i++;
 569                tmp++;
 570        }
 571        *ppos = i;
 572        return tmp-buf;
 573}
 574
 575static ssize_t write_port(struct file *file, const char __user *buf,
 576                          size_t count, loff_t *ppos)
 577{
 578        unsigned long i = *ppos;
 579        const char __user *tmp = buf;
 580
 581        if (!access_ok(VERIFY_READ, buf, count))
 582                return -EFAULT;
 583        while (count-- > 0 && i < 65536) {
 584                char c;
 585
 586                if (__get_user(c, tmp)) {
 587                        if (tmp > buf)
 588                                break;
 589                        return -EFAULT;
 590                }
 591                outb(c, i);
 592                i++;
 593                tmp++;
 594        }
 595        *ppos = i;
 596        return tmp-buf;
 597}
 598
 599static ssize_t read_null(struct file *file, char __user *buf,
 600                         size_t count, loff_t *ppos)
 601{
 602        return 0;
 603}
 604
 605static ssize_t write_null(struct file *file, const char __user *buf,
 606                          size_t count, loff_t *ppos)
 607{
 608        return count;
 609}
 610
 611static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
 612{
 613        return 0;
 614}
 615
 616static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
 617{
 618        size_t count = iov_iter_count(from);
 619        iov_iter_advance(from, count);
 620        return count;
 621}
 622
 623static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
 624                        struct splice_desc *sd)
 625{
 626        return sd->len;
 627}
 628
 629static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
 630                                 loff_t *ppos, size_t len, unsigned int flags)
 631{
 632        return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
 633}
 634
 635static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
 636{
 637        size_t written = 0;
 638
 639        while (iov_iter_count(iter)) {
 640                size_t chunk = iov_iter_count(iter), n;
 641
 642                if (chunk > PAGE_SIZE)
 643                        chunk = PAGE_SIZE;      /* Just for latency reasons */
 644                n = iov_iter_zero(chunk, iter);
 645                if (!n && iov_iter_count(iter))
 646                        return written ? written : -EFAULT;
 647                written += n;
 648                if (signal_pending(current))
 649                        return written ? written : -ERESTARTSYS;
 650                cond_resched();
 651        }
 652        return written;
 653}
 654
 655static int mmap_zero(struct file *file, struct vm_area_struct *vma)
 656{
 657#ifndef CONFIG_MMU
 658        return -ENOSYS;
 659#endif
 660        if (vma->vm_flags & VM_SHARED)
 661                return shmem_zero_setup(vma);
 662        return 0;
 663}
 664
 665static unsigned long get_unmapped_area_zero(struct file *file,
 666                                unsigned long addr, unsigned long len,
 667                                unsigned long pgoff, unsigned long flags)
 668{
 669#ifdef CONFIG_MMU
 670        if (flags & MAP_SHARED) {
 671                /*
 672                 * mmap_zero() will call shmem_zero_setup() to create a file,
 673                 * so use shmem's get_unmapped_area in case it can be huge;
 674                 * and pass NULL for file as in mmap.c's get_unmapped_area(),
 675                 * so as not to confuse shmem with our handle on "/dev/zero".
 676                 */
 677                return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
 678        }
 679
 680        /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
 681        return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
 682#else
 683        return -ENOSYS;
 684#endif
 685}
 686
 687static ssize_t write_full(struct file *file, const char __user *buf,
 688                          size_t count, loff_t *ppos)
 689{
 690        return -ENOSPC;
 691}
 692
 693/*
 694 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
 695 * can fopen() both devices with "a" now.  This was previously impossible.
 696 * -- SRB.
 697 */
 698static loff_t null_lseek(struct file *file, loff_t offset, int orig)
 699{
 700        return file->f_pos = 0;
 701}
 702
 703/*
 704 * The memory devices use the full 32/64 bits of the offset, and so we cannot
 705 * check against negative addresses: they are ok. The return value is weird,
 706 * though, in that case (0).
 707 *
 708 * also note that seeking relative to the "end of file" isn't supported:
 709 * it has no meaning, so it returns -EINVAL.
 710 */
 711static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
 712{
 713        loff_t ret;
 714
 715        inode_lock(file_inode(file));
 716        switch (orig) {
 717        case SEEK_CUR:
 718                offset += file->f_pos;
 719        case SEEK_SET:
 720                /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
 721                if ((unsigned long long)offset >= -MAX_ERRNO) {
 722                        ret = -EOVERFLOW;
 723                        break;
 724                }
 725                file->f_pos = offset;
 726                ret = file->f_pos;
 727                force_successful_syscall_return();
 728                break;
 729        default:
 730                ret = -EINVAL;
 731        }
 732        inode_unlock(file_inode(file));
 733        return ret;
 734}
 735
 736static int open_port(struct inode *inode, struct file *filp)
 737{
 738        return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
 739}
 740
 741#define zero_lseek      null_lseek
 742#define full_lseek      null_lseek
 743#define write_zero      write_null
 744#define write_iter_zero write_iter_null
 745#define open_mem        open_port
 746#define open_kmem       open_mem
 747
 748static const struct file_operations __maybe_unused mem_fops = {
 749        .llseek         = memory_lseek,
 750        .read           = read_mem,
 751        .write          = write_mem,
 752        .mmap           = mmap_mem,
 753        .open           = open_mem,
 754#ifndef CONFIG_MMU
 755        .get_unmapped_area = get_unmapped_area_mem,
 756        .mmap_capabilities = memory_mmap_capabilities,
 757#endif
 758};
 759
 760static const struct file_operations __maybe_unused kmem_fops = {
 761        .llseek         = memory_lseek,
 762        .read           = read_kmem,
 763        .write          = write_kmem,
 764        .mmap           = mmap_kmem,
 765        .open           = open_kmem,
 766#ifndef CONFIG_MMU
 767        .get_unmapped_area = get_unmapped_area_mem,
 768        .mmap_capabilities = memory_mmap_capabilities,
 769#endif
 770};
 771
 772static const struct file_operations null_fops = {
 773        .llseek         = null_lseek,
 774        .read           = read_null,
 775        .write          = write_null,
 776        .read_iter      = read_iter_null,
 777        .write_iter     = write_iter_null,
 778        .splice_write   = splice_write_null,
 779};
 780
 781static const struct file_operations __maybe_unused port_fops = {
 782        .llseek         = memory_lseek,
 783        .read           = read_port,
 784        .write          = write_port,
 785        .open           = open_port,
 786};
 787
 788static const struct file_operations zero_fops = {
 789        .llseek         = zero_lseek,
 790        .write          = write_zero,
 791        .read_iter      = read_iter_zero,
 792        .write_iter     = write_iter_zero,
 793        .mmap           = mmap_zero,
 794        .get_unmapped_area = get_unmapped_area_zero,
 795#ifndef CONFIG_MMU
 796        .mmap_capabilities = zero_mmap_capabilities,
 797#endif
 798};
 799
 800static const struct file_operations full_fops = {
 801        .llseek         = full_lseek,
 802        .read_iter      = read_iter_zero,
 803        .write          = write_full,
 804};
 805
 806static const struct memdev {
 807        const char *name;
 808        umode_t mode;
 809        const struct file_operations *fops;
 810        fmode_t fmode;
 811} devlist[] = {
 812#ifdef CONFIG_DEVMEM
 813         [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
 814#endif
 815#ifdef CONFIG_DEVKMEM
 816         [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
 817#endif
 818         [3] = { "null", 0666, &null_fops, 0 },
 819#ifdef CONFIG_DEVPORT
 820         [4] = { "port", 0, &port_fops, 0 },
 821#endif
 822         [5] = { "zero", 0666, &zero_fops, 0 },
 823         [7] = { "full", 0666, &full_fops, 0 },
 824         [8] = { "random", 0666, &random_fops, 0 },
 825         [9] = { "urandom", 0666, &urandom_fops, 0 },
 826#ifdef CONFIG_PRINTK
 827        [11] = { "kmsg", 0644, &kmsg_fops, 0 },
 828#endif
 829};
 830
 831static int memory_open(struct inode *inode, struct file *filp)
 832{
 833        int minor;
 834        const struct memdev *dev;
 835
 836        minor = iminor(inode);
 837        if (minor >= ARRAY_SIZE(devlist))
 838                return -ENXIO;
 839
 840        dev = &devlist[minor];
 841        if (!dev->fops)
 842                return -ENXIO;
 843
 844        filp->f_op = dev->fops;
 845        filp->f_mode |= dev->fmode;
 846
 847        if (dev->fops->open)
 848                return dev->fops->open(inode, filp);
 849
 850        return 0;
 851}
 852
 853static const struct file_operations memory_fops = {
 854        .open = memory_open,
 855        .llseek = noop_llseek,
 856};
 857
 858static char *mem_devnode(struct device *dev, umode_t *mode)
 859{
 860        if (mode && devlist[MINOR(dev->devt)].mode)
 861                *mode = devlist[MINOR(dev->devt)].mode;
 862        return NULL;
 863}
 864
 865static struct class *mem_class;
 866
 867static int __init chr_dev_init(void)
 868{
 869        int minor;
 870
 871        if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
 872                printk("unable to get major %d for memory devs\n", MEM_MAJOR);
 873
 874        mem_class = class_create(THIS_MODULE, "mem");
 875        if (IS_ERR(mem_class))
 876                return PTR_ERR(mem_class);
 877
 878        mem_class->devnode = mem_devnode;
 879        for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
 880                if (!devlist[minor].name)
 881                        continue;
 882
 883                /*
 884                 * Create /dev/port?
 885                 */
 886                if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
 887                        continue;
 888
 889                device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
 890                              NULL, devlist[minor].name);
 891        }
 892
 893        return tty_init();
 894}
 895
 896fs_initcall(chr_dev_init);
 897
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.