linux/drivers/char/mem.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *  linux/drivers/char/mem.c
   4 *
   5 *  Copyright (C) 1991, 1992  Linus Torvalds
   6 *
   7 *  Added devfs support.
   8 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
   9 *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  10 */
  11
  12#include <linux/mm.h>
  13#include <linux/miscdevice.h>
  14#include <linux/slab.h>
  15#include <linux/vmalloc.h>
  16#include <linux/mman.h>
  17#include <linux/random.h>
  18#include <linux/init.h>
  19#include <linux/raw.h>
  20#include <linux/tty.h>
  21#include <linux/capability.h>
  22#include <linux/ptrace.h>
  23#include <linux/device.h>
  24#include <linux/highmem.h>
  25#include <linux/backing-dev.h>
  26#include <linux/shmem_fs.h>
  27#include <linux/splice.h>
  28#include <linux/pfn.h>
  29#include <linux/export.h>
  30#include <linux/io.h>
  31#include <linux/uio.h>
  32#include <linux/uaccess.h>
  33#include <linux/security.h>
  34
  35#ifdef CONFIG_IA64
  36# include <linux/efi.h>
  37#endif
  38
  39#define DEVMEM_MINOR    1
  40#define DEVPORT_MINOR   4
  41
  42static inline unsigned long size_inside_page(unsigned long start,
  43                                             unsigned long size)
  44{
  45        unsigned long sz;
  46
  47        sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
  48
  49        return min(sz, size);
  50}
  51
  52#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
  53static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
  54{
  55        return addr + count <= __pa(high_memory);
  56}
  57
  58static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  59{
  60        return 1;
  61}
  62#endif
  63
  64#ifdef CONFIG_STRICT_DEVMEM
  65static inline int page_is_allowed(unsigned long pfn)
  66{
  67        return devmem_is_allowed(pfn);
  68}
  69static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  70{
  71        u64 from = ((u64)pfn) << PAGE_SHIFT;
  72        u64 to = from + size;
  73        u64 cursor = from;
  74
  75        while (cursor < to) {
  76                if (!devmem_is_allowed(pfn))
  77                        return 0;
  78                cursor += PAGE_SIZE;
  79                pfn++;
  80        }
  81        return 1;
  82}
  83#else
  84static inline int page_is_allowed(unsigned long pfn)
  85{
  86        return 1;
  87}
  88static inline int range_is_allowed(unsigned long pfn, unsigned long size)
  89{
  90        return 1;
  91}
  92#endif
  93
  94#ifndef unxlate_dev_mem_ptr
  95#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
  96void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
  97{
  98}
  99#endif
 100
 101static inline bool should_stop_iteration(void)
 102{
 103        if (need_resched())
 104                cond_resched();
 105        return fatal_signal_pending(current);
 106}
 107
 108/*
 109 * This funcion reads the *physical* memory. The f_pos points directly to the
 110 * memory location.
 111 */
 112static ssize_t read_mem(struct file *file, char __user *buf,
 113                        size_t count, loff_t *ppos)
 114{
 115        phys_addr_t p = *ppos;
 116        ssize_t read, sz;
 117        void *ptr;
 118        char *bounce;
 119        int err;
 120
 121        if (p != *ppos)
 122                return 0;
 123
 124        if (!valid_phys_addr_range(p, count))
 125                return -EFAULT;
 126        read = 0;
 127#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 128        /* we don't have page 0 mapped on sparc and m68k.. */
 129        if (p < PAGE_SIZE) {
 130                sz = size_inside_page(p, count);
 131                if (sz > 0) {
 132                        if (clear_user(buf, sz))
 133                                return -EFAULT;
 134                        buf += sz;
 135                        p += sz;
 136                        count -= sz;
 137                        read += sz;
 138                }
 139        }
 140#endif
 141
 142        bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
 143        if (!bounce)
 144                return -ENOMEM;
 145
 146        while (count > 0) {
 147                unsigned long remaining;
 148                int allowed, probe;
 149
 150                sz = size_inside_page(p, count);
 151
 152                err = -EPERM;
 153                allowed = page_is_allowed(p >> PAGE_SHIFT);
 154                if (!allowed)
 155                        goto failed;
 156
 157                err = -EFAULT;
 158                if (allowed == 2) {
 159                        /* Show zeros for restricted memory. */
 160                        remaining = clear_user(buf, sz);
 161                } else {
 162                        /*
 163                         * On ia64 if a page has been mapped somewhere as
 164                         * uncached, then it must also be accessed uncached
 165                         * by the kernel or data corruption may occur.
 166                         */
 167                        ptr = xlate_dev_mem_ptr(p);
 168                        if (!ptr)
 169                                goto failed;
 170
 171                        probe = copy_from_kernel_nofault(bounce, ptr, sz);
 172                        unxlate_dev_mem_ptr(p, ptr);
 173                        if (probe)
 174                                goto failed;
 175
 176                        remaining = copy_to_user(buf, bounce, sz);
 177                }
 178
 179                if (remaining)
 180                        goto failed;
 181
 182                buf += sz;
 183                p += sz;
 184                count -= sz;
 185                read += sz;
 186                if (should_stop_iteration())
 187                        break;
 188        }
 189        kfree(bounce);
 190
 191        *ppos += read;
 192        return read;
 193
 194failed:
 195        kfree(bounce);
 196        return err;
 197}
 198
 199static ssize_t write_mem(struct file *file, const char __user *buf,
 200                         size_t count, loff_t *ppos)
 201{
 202        phys_addr_t p = *ppos;
 203        ssize_t written, sz;
 204        unsigned long copied;
 205        void *ptr;
 206
 207        if (p != *ppos)
 208                return -EFBIG;
 209
 210        if (!valid_phys_addr_range(p, count))
 211                return -EFAULT;
 212
 213        written = 0;
 214
 215#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
 216        /* we don't have page 0 mapped on sparc and m68k.. */
 217        if (p < PAGE_SIZE) {
 218                sz = size_inside_page(p, count);
 219                /* Hmm. Do something? */
 220                buf += sz;
 221                p += sz;
 222                count -= sz;
 223                written += sz;
 224        }
 225#endif
 226
 227        while (count > 0) {
 228                int allowed;
 229
 230                sz = size_inside_page(p, count);
 231
 232                allowed = page_is_allowed(p >> PAGE_SHIFT);
 233                if (!allowed)
 234                        return -EPERM;
 235
 236                /* Skip actual writing when a page is marked as restricted. */
 237                if (allowed == 1) {
 238                        /*
 239                         * On ia64 if a page has been mapped somewhere as
 240                         * uncached, then it must also be accessed uncached
 241                         * by the kernel or data corruption may occur.
 242                         */
 243                        ptr = xlate_dev_mem_ptr(p);
 244                        if (!ptr) {
 245                                if (written)
 246                                        break;
 247                                return -EFAULT;
 248                        }
 249
 250                        copied = copy_from_user(ptr, buf, sz);
 251                        unxlate_dev_mem_ptr(p, ptr);
 252                        if (copied) {
 253                                written += sz - copied;
 254                                if (written)
 255                                        break;
 256                                return -EFAULT;
 257                        }
 258                }
 259
 260                buf += sz;
 261                p += sz;
 262                count -= sz;
 263                written += sz;
 264                if (should_stop_iteration())
 265                        break;
 266        }
 267
 268        *ppos += written;
 269        return written;
 270}
 271
 272int __weak phys_mem_access_prot_allowed(struct file *file,
 273        unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
 274{
 275        return 1;
 276}
 277
 278#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
 279
 280/*
 281 * Architectures vary in how they handle caching for addresses
 282 * outside of main memory.
 283 *
 284 */
 285#ifdef pgprot_noncached
 286static int uncached_access(struct file *file, phys_addr_t addr)
 287{
 288#if defined(CONFIG_IA64)
 289        /*
 290         * On ia64, we ignore O_DSYNC because we cannot tolerate memory
 291         * attribute aliases.
 292         */
 293        return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
 294#else
 295        /*
 296         * Accessing memory above the top the kernel knows about or through a
 297         * file pointer
 298         * that was marked O_DSYNC will be done non-cached.
 299         */
 300        if (file->f_flags & O_DSYNC)
 301                return 1;
 302        return addr >= __pa(high_memory);
 303#endif
 304}
 305#endif
 306
 307static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 308                                     unsigned long size, pgprot_t vma_prot)
 309{
 310#ifdef pgprot_noncached
 311        phys_addr_t offset = pfn << PAGE_SHIFT;
 312
 313        if (uncached_access(file, offset))
 314                return pgprot_noncached(vma_prot);
 315#endif
 316        return vma_prot;
 317}
 318#endif
 319
 320#ifndef CONFIG_MMU
 321static unsigned long get_unmapped_area_mem(struct file *file,
 322                                           unsigned long addr,
 323                                           unsigned long len,
 324                                           unsigned long pgoff,
 325                                           unsigned long flags)
 326{
 327        if (!valid_mmap_phys_addr_range(pgoff, len))
 328                return (unsigned long) -EINVAL;
 329        return pgoff << PAGE_SHIFT;
 330}
 331
 332/* permit direct mmap, for read, write or exec */
 333static unsigned memory_mmap_capabilities(struct file *file)
 334{
 335        return NOMMU_MAP_DIRECT |
 336                NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
 337}
 338
 339static unsigned zero_mmap_capabilities(struct file *file)
 340{
 341        return NOMMU_MAP_COPY;
 342}
 343
 344/* can't do an in-place private mapping if there's no MMU */
 345static inline int private_mapping_ok(struct vm_area_struct *vma)
 346{
 347        return vma->vm_flags & VM_MAYSHARE;
 348}
 349#else
 350
 351static inline int private_mapping_ok(struct vm_area_struct *vma)
 352{
 353        return 1;
 354}
 355#endif
 356
 357static const struct vm_operations_struct mmap_mem_ops = {
 358#ifdef CONFIG_HAVE_IOREMAP_PROT
 359        .access = generic_access_phys
 360#endif
 361};
 362
 363static int mmap_mem(struct file *file, struct vm_area_struct *vma)
 364{
 365        size_t size = vma->vm_end - vma->vm_start;
 366        phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
 367
 368        /* Does it even fit in phys_addr_t? */
 369        if (offset >> PAGE_SHIFT != vma->vm_pgoff)
 370                return -EINVAL;
 371
 372        /* It's illegal to wrap around the end of the physical address space. */
 373        if (offset + (phys_addr_t)size - 1 < offset)
 374                return -EINVAL;
 375
 376        if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
 377                return -EINVAL;
 378
 379        if (!private_mapping_ok(vma))
 380                return -ENOSYS;
 381
 382        if (!range_is_allowed(vma->vm_pgoff, size))
 383                return -EPERM;
 384
 385        if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
 386                                                &vma->vm_page_prot))
 387                return -EINVAL;
 388
 389        vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
 390                                                 size,
 391                                                 vma->vm_page_prot);
 392
 393        vma->vm_ops = &mmap_mem_ops;
 394
 395        /* Remap-pfn-range will mark the range VM_IO */
 396        if (remap_pfn_range(vma,
 397                            vma->vm_start,
 398                            vma->vm_pgoff,
 399                            size,
 400                            vma->vm_page_prot)) {
 401                return -EAGAIN;
 402        }
 403        return 0;
 404}
 405
 406static ssize_t read_port(struct file *file, char __user *buf,
 407                         size_t count, loff_t *ppos)
 408{
 409        unsigned long i = *ppos;
 410        char __user *tmp = buf;
 411
 412        if (!access_ok(buf, count))
 413                return -EFAULT;
 414        while (count-- > 0 && i < 65536) {
 415                if (__put_user(inb(i), tmp) < 0)
 416                        return -EFAULT;
 417                i++;
 418                tmp++;
 419        }
 420        *ppos = i;
 421        return tmp-buf;
 422}
 423
 424static ssize_t write_port(struct file *file, const char __user *buf,
 425                          size_t count, loff_t *ppos)
 426{
 427        unsigned long i = *ppos;
 428        const char __user *tmp = buf;
 429
 430        if (!access_ok(buf, count))
 431                return -EFAULT;
 432        while (count-- > 0 && i < 65536) {
 433                char c;
 434
 435                if (__get_user(c, tmp)) {
 436                        if (tmp > buf)
 437                                break;
 438                        return -EFAULT;
 439                }
 440                outb(c, i);
 441                i++;
 442                tmp++;
 443        }
 444        *ppos = i;
 445        return tmp-buf;
 446}
 447
 448static ssize_t read_null(struct file *file, char __user *buf,
 449                         size_t count, loff_t *ppos)
 450{
 451        return 0;
 452}
 453
 454static ssize_t write_null(struct file *file, const char __user *buf,
 455                          size_t count, loff_t *ppos)
 456{
 457        return count;
 458}
 459
 460static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
 461{
 462        return 0;
 463}
 464
 465static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
 466{
 467        size_t count = iov_iter_count(from);
 468        iov_iter_advance(from, count);
 469        return count;
 470}
 471
 472static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
 473                        struct splice_desc *sd)
 474{
 475        return sd->len;
 476}
 477
 478static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
 479                                 loff_t *ppos, size_t len, unsigned int flags)
 480{
 481        return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
 482}
 483
 484static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
 485{
 486        size_t written = 0;
 487
 488        while (iov_iter_count(iter)) {
 489                size_t chunk = iov_iter_count(iter), n;
 490
 491                if (chunk > PAGE_SIZE)
 492                        chunk = PAGE_SIZE;      /* Just for latency reasons */
 493                n = iov_iter_zero(chunk, iter);
 494                if (!n && iov_iter_count(iter))
 495                        return written ? written : -EFAULT;
 496                written += n;
 497                if (signal_pending(current))
 498                        return written ? written : -ERESTARTSYS;
 499                cond_resched();
 500        }
 501        return written;
 502}
 503
 504static ssize_t read_zero(struct file *file, char __user *buf,
 505                         size_t count, loff_t *ppos)
 506{
 507        size_t cleared = 0;
 508
 509        while (count) {
 510                size_t chunk = min_t(size_t, count, PAGE_SIZE);
 511                size_t left;
 512
 513                left = clear_user(buf + cleared, chunk);
 514                if (unlikely(left)) {
 515                        cleared += (chunk - left);
 516                        if (!cleared)
 517                                return -EFAULT;
 518                        break;
 519                }
 520                cleared += chunk;
 521                count -= chunk;
 522
 523                if (signal_pending(current))
 524                        break;
 525                cond_resched();
 526        }
 527
 528        return cleared;
 529}
 530
 531static int mmap_zero(struct file *file, struct vm_area_struct *vma)
 532{
 533#ifndef CONFIG_MMU
 534        return -ENOSYS;
 535#endif
 536        if (vma->vm_flags & VM_SHARED)
 537                return shmem_zero_setup(vma);
 538        vma_set_anonymous(vma);
 539        return 0;
 540}
 541
 542static unsigned long get_unmapped_area_zero(struct file *file,
 543                                unsigned long addr, unsigned long len,
 544                                unsigned long pgoff, unsigned long flags)
 545{
 546#ifdef CONFIG_MMU
 547        if (flags & MAP_SHARED) {
 548                /*
 549                 * mmap_zero() will call shmem_zero_setup() to create a file,
 550                 * so use shmem's get_unmapped_area in case it can be huge;
 551                 * and pass NULL for file as in mmap.c's get_unmapped_area(),
 552                 * so as not to confuse shmem with our handle on "/dev/zero".
 553                 */
 554                return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
 555        }
 556
 557        /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
 558        return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
 559#else
 560        return -ENOSYS;
 561#endif
 562}
 563
 564static ssize_t write_full(struct file *file, const char __user *buf,
 565                          size_t count, loff_t *ppos)
 566{
 567        return -ENOSPC;
 568}
 569
 570/*
 571 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
 572 * can fopen() both devices with "a" now.  This was previously impossible.
 573 * -- SRB.
 574 */
 575static loff_t null_lseek(struct file *file, loff_t offset, int orig)
 576{
 577        return file->f_pos = 0;
 578}
 579
 580/*
 581 * The memory devices use the full 32/64 bits of the offset, and so we cannot
 582 * check against negative addresses: they are ok. The return value is weird,
 583 * though, in that case (0).
 584 *
 585 * also note that seeking relative to the "end of file" isn't supported:
 586 * it has no meaning, so it returns -EINVAL.
 587 */
 588static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
 589{
 590        loff_t ret;
 591
 592        inode_lock(file_inode(file));
 593        switch (orig) {
 594        case SEEK_CUR:
 595                offset += file->f_pos;
 596                fallthrough;
 597        case SEEK_SET:
 598                /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
 599                if ((unsigned long long)offset >= -MAX_ERRNO) {
 600                        ret = -EOVERFLOW;
 601                        break;
 602                }
 603                file->f_pos = offset;
 604                ret = file->f_pos;
 605                force_successful_syscall_return();
 606                break;
 607        default:
 608                ret = -EINVAL;
 609        }
 610        inode_unlock(file_inode(file));
 611        return ret;
 612}
 613
 614static int open_port(struct inode *inode, struct file *filp)
 615{
 616        int rc;
 617
 618        if (!capable(CAP_SYS_RAWIO))
 619                return -EPERM;
 620
 621        rc = security_locked_down(LOCKDOWN_DEV_MEM);
 622        if (rc)
 623                return rc;
 624
 625        if (iminor(inode) != DEVMEM_MINOR)
 626                return 0;
 627
 628        /*
 629         * Use a unified address space to have a single point to manage
 630         * revocations when drivers want to take over a /dev/mem mapped
 631         * range.
 632         */
 633        filp->f_mapping = iomem_get_mapping();
 634
 635        return 0;
 636}
 637
 638#define zero_lseek      null_lseek
 639#define full_lseek      null_lseek
 640#define write_zero      write_null
 641#define write_iter_zero write_iter_null
 642#define open_mem        open_port
 643
 644static const struct file_operations __maybe_unused mem_fops = {
 645        .llseek         = memory_lseek,
 646        .read           = read_mem,
 647        .write          = write_mem,
 648        .mmap           = mmap_mem,
 649        .open           = open_mem,
 650#ifndef CONFIG_MMU
 651        .get_unmapped_area = get_unmapped_area_mem,
 652        .mmap_capabilities = memory_mmap_capabilities,
 653#endif
 654};
 655
 656static const struct file_operations null_fops = {
 657        .llseek         = null_lseek,
 658        .read           = read_null,
 659        .write          = write_null,
 660        .read_iter      = read_iter_null,
 661        .write_iter     = write_iter_null,
 662        .splice_write   = splice_write_null,
 663};
 664
 665static const struct file_operations __maybe_unused port_fops = {
 666        .llseek         = memory_lseek,
 667        .read           = read_port,
 668        .write          = write_port,
 669        .open           = open_port,
 670};
 671
 672static const struct file_operations zero_fops = {
 673        .llseek         = zero_lseek,
 674        .write          = write_zero,
 675        .read_iter      = read_iter_zero,
 676        .read           = read_zero,
 677        .write_iter     = write_iter_zero,
 678        .mmap           = mmap_zero,
 679        .get_unmapped_area = get_unmapped_area_zero,
 680#ifndef CONFIG_MMU
 681        .mmap_capabilities = zero_mmap_capabilities,
 682#endif
 683};
 684
 685static const struct file_operations full_fops = {
 686        .llseek         = full_lseek,
 687        .read_iter      = read_iter_zero,
 688        .write          = write_full,
 689};
 690
 691static const struct memdev {
 692        const char *name;
 693        umode_t mode;
 694        const struct file_operations *fops;
 695        fmode_t fmode;
 696} devlist[] = {
 697#ifdef CONFIG_DEVMEM
 698         [DEVMEM_MINOR] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
 699#endif
 700         [3] = { "null", 0666, &null_fops, 0 },
 701#ifdef CONFIG_DEVPORT
 702         [4] = { "port", 0, &port_fops, 0 },
 703#endif
 704         [5] = { "zero", 0666, &zero_fops, 0 },
 705         [7] = { "full", 0666, &full_fops, 0 },
 706         [8] = { "random", 0666, &random_fops, 0 },
 707         [9] = { "urandom", 0666, &urandom_fops, 0 },
 708#ifdef CONFIG_PRINTK
 709        [11] = { "kmsg", 0644, &kmsg_fops, 0 },
 710#endif
 711};
 712
 713static int memory_open(struct inode *inode, struct file *filp)
 714{
 715        int minor;
 716        const struct memdev *dev;
 717
 718        minor = iminor(inode);
 719        if (minor >= ARRAY_SIZE(devlist))
 720                return -ENXIO;
 721
 722        dev = &devlist[minor];
 723        if (!dev->fops)
 724                return -ENXIO;
 725
 726        filp->f_op = dev->fops;
 727        filp->f_mode |= dev->fmode;
 728
 729        if (dev->fops->open)
 730                return dev->fops->open(inode, filp);
 731
 732        return 0;
 733}
 734
 735static const struct file_operations memory_fops = {
 736        .open = memory_open,
 737        .llseek = noop_llseek,
 738};
 739
 740static char *mem_devnode(struct device *dev, umode_t *mode)
 741{
 742        if (mode && devlist[MINOR(dev->devt)].mode)
 743                *mode = devlist[MINOR(dev->devt)].mode;
 744        return NULL;
 745}
 746
 747static struct class *mem_class;
 748
 749static int __init chr_dev_init(void)
 750{
 751        int minor;
 752
 753        if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
 754                printk("unable to get major %d for memory devs\n", MEM_MAJOR);
 755
 756        mem_class = class_create(THIS_MODULE, "mem");
 757        if (IS_ERR(mem_class))
 758                return PTR_ERR(mem_class);
 759
 760        mem_class->devnode = mem_devnode;
 761        for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
 762                if (!devlist[minor].name)
 763                        continue;
 764
 765                /*
 766                 * Create /dev/port?
 767                 */
 768                if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
 769                        continue;
 770
 771                device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
 772                              NULL, devlist[minor].name);
 773        }
 774
 775        return tty_init();
 776}
 777
 778fs_initcall(chr_dev_init);
 779