Linux-libre 4.4.222-gnu
[librecmc/linux-libre.git] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/splice.h>
26 #include <linux/pfn.h>
27 #include <linux/export.h>
28 #include <linux/io.h>
29 #include <linux/uio.h>
30
31 #include <linux/uaccess.h>
32
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36
37 #define DEVPORT_MINOR   4
38
39 static inline unsigned long size_inside_page(unsigned long start,
40                                              unsigned long size)
41 {
42         unsigned long sz;
43
44         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
45
46         return min(sz, size);
47 }
48
49 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
50 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
51 {
52         return addr + count <= __pa(high_memory);
53 }
54
55 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
56 {
57         return 1;
58 }
59 #endif
60
61 #ifdef CONFIG_STRICT_DEVMEM
62 static inline int page_is_allowed(unsigned long pfn)
63 {
64         return devmem_is_allowed(pfn);
65 }
66 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
67 {
68         u64 from = ((u64)pfn) << PAGE_SHIFT;
69         u64 to = from + size;
70         u64 cursor = from;
71
72         while (cursor < to) {
73                 if (!devmem_is_allowed(pfn))
74                         return 0;
75                 cursor += PAGE_SIZE;
76                 pfn++;
77         }
78         return 1;
79 }
80 #else
81 static inline int page_is_allowed(unsigned long pfn)
82 {
83         return 1;
84 }
85 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
86 {
87         return 1;
88 }
89 #endif
90
91 #ifndef unxlate_dev_mem_ptr
92 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
93 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
94 {
95 }
96 #endif
97
98 static inline bool should_stop_iteration(void)
99 {
100         if (need_resched())
101                 cond_resched();
102         return fatal_signal_pending(current);
103 }
104
105 /*
106  * This funcion reads the *physical* memory. The f_pos points directly to the
107  * memory location.
108  */
109 static ssize_t read_mem(struct file *file, char __user *buf,
110                         size_t count, loff_t *ppos)
111 {
112         phys_addr_t p = *ppos;
113         ssize_t read, sz;
114         void *ptr;
115
116         if (p != *ppos)
117                 return 0;
118
119         if (!valid_phys_addr_range(p, count))
120                 return -EFAULT;
121         read = 0;
122 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
123         /* we don't have page 0 mapped on sparc and m68k.. */
124         if (p < PAGE_SIZE) {
125                 sz = size_inside_page(p, count);
126                 if (sz > 0) {
127                         if (clear_user(buf, sz))
128                                 return -EFAULT;
129                         buf += sz;
130                         p += sz;
131                         count -= sz;
132                         read += sz;
133                 }
134         }
135 #endif
136
137         while (count > 0) {
138                 unsigned long remaining;
139                 int allowed;
140
141                 sz = size_inside_page(p, count);
142
143                 allowed = page_is_allowed(p >> PAGE_SHIFT);
144                 if (!allowed)
145                         return -EPERM;
146                 if (allowed == 2) {
147                         /* Show zeros for restricted memory. */
148                         remaining = clear_user(buf, sz);
149                 } else {
150                         /*
151                          * On ia64 if a page has been mapped somewhere as
152                          * uncached, then it must also be accessed uncached
153                          * by the kernel or data corruption may occur.
154                          */
155                         ptr = xlate_dev_mem_ptr(p);
156                         if (!ptr)
157                                 return -EFAULT;
158
159                         remaining = copy_to_user(buf, ptr, sz);
160
161                         unxlate_dev_mem_ptr(p, ptr);
162                 }
163
164                 if (remaining)
165                         return -EFAULT;
166
167                 buf += sz;
168                 p += sz;
169                 count -= sz;
170                 read += sz;
171                 if (should_stop_iteration())
172                         break;
173         }
174
175         *ppos += read;
176         return read;
177 }
178
179 static ssize_t write_mem(struct file *file, const char __user *buf,
180                          size_t count, loff_t *ppos)
181 {
182         phys_addr_t p = *ppos;
183         ssize_t written, sz;
184         unsigned long copied;
185         void *ptr;
186
187         if (p != *ppos)
188                 return -EFBIG;
189
190         if (!valid_phys_addr_range(p, count))
191                 return -EFAULT;
192
193         written = 0;
194
195 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
196         /* we don't have page 0 mapped on sparc and m68k.. */
197         if (p < PAGE_SIZE) {
198                 sz = size_inside_page(p, count);
199                 /* Hmm. Do something? */
200                 buf += sz;
201                 p += sz;
202                 count -= sz;
203                 written += sz;
204         }
205 #endif
206
207         while (count > 0) {
208                 int allowed;
209
210                 sz = size_inside_page(p, count);
211
212                 allowed = page_is_allowed(p >> PAGE_SHIFT);
213                 if (!allowed)
214                         return -EPERM;
215
216                 /* Skip actual writing when a page is marked as restricted. */
217                 if (allowed == 1) {
218                         /*
219                          * On ia64 if a page has been mapped somewhere as
220                          * uncached, then it must also be accessed uncached
221                          * by the kernel or data corruption may occur.
222                          */
223                         ptr = xlate_dev_mem_ptr(p);
224                         if (!ptr) {
225                                 if (written)
226                                         break;
227                                 return -EFAULT;
228                         }
229
230                         copied = copy_from_user(ptr, buf, sz);
231                         unxlate_dev_mem_ptr(p, ptr);
232                         if (copied) {
233                                 written += sz - copied;
234                                 if (written)
235                                         break;
236                                 return -EFAULT;
237                         }
238                 }
239
240                 buf += sz;
241                 p += sz;
242                 count -= sz;
243                 written += sz;
244                 if (should_stop_iteration())
245                         break;
246         }
247
248         *ppos += written;
249         return written;
250 }
251
252 int __weak phys_mem_access_prot_allowed(struct file *file,
253         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
254 {
255         return 1;
256 }
257
258 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
259
260 /*
261  * Architectures vary in how they handle caching for addresses
262  * outside of main memory.
263  *
264  */
265 #ifdef pgprot_noncached
266 static int uncached_access(struct file *file, phys_addr_t addr)
267 {
268 #if defined(CONFIG_IA64)
269         /*
270          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
271          * attribute aliases.
272          */
273         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
274 #elif defined(CONFIG_MIPS)
275         {
276                 extern int __uncached_access(struct file *file,
277                                              unsigned long addr);
278
279                 return __uncached_access(file, addr);
280         }
281 #else
282         /*
283          * Accessing memory above the top the kernel knows about or through a
284          * file pointer
285          * that was marked O_DSYNC will be done non-cached.
286          */
287         if (file->f_flags & O_DSYNC)
288                 return 1;
289         return addr >= __pa(high_memory);
290 #endif
291 }
292 #endif
293
294 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
295                                      unsigned long size, pgprot_t vma_prot)
296 {
297 #ifdef pgprot_noncached
298         phys_addr_t offset = pfn << PAGE_SHIFT;
299
300         if (uncached_access(file, offset))
301                 return pgprot_noncached(vma_prot);
302 #endif
303         return vma_prot;
304 }
305 #endif
306
307 #ifndef CONFIG_MMU
308 static unsigned long get_unmapped_area_mem(struct file *file,
309                                            unsigned long addr,
310                                            unsigned long len,
311                                            unsigned long pgoff,
312                                            unsigned long flags)
313 {
314         if (!valid_mmap_phys_addr_range(pgoff, len))
315                 return (unsigned long) -EINVAL;
316         return pgoff << PAGE_SHIFT;
317 }
318
319 /* permit direct mmap, for read, write or exec */
320 static unsigned memory_mmap_capabilities(struct file *file)
321 {
322         return NOMMU_MAP_DIRECT |
323                 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
324 }
325
326 static unsigned zero_mmap_capabilities(struct file *file)
327 {
328         return NOMMU_MAP_COPY;
329 }
330
331 /* can't do an in-place private mapping if there's no MMU */
332 static inline int private_mapping_ok(struct vm_area_struct *vma)
333 {
334         return vma->vm_flags & VM_MAYSHARE;
335 }
336 #else
337
338 static inline int private_mapping_ok(struct vm_area_struct *vma)
339 {
340         return 1;
341 }
342 #endif
343
344 static const struct vm_operations_struct mmap_mem_ops = {
345 #ifdef CONFIG_HAVE_IOREMAP_PROT
346         .access = generic_access_phys
347 #endif
348 };
349
350 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
351 {
352         size_t size = vma->vm_end - vma->vm_start;
353         phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
354
355         /* It's illegal to wrap around the end of the physical address space. */
356         if (offset + (phys_addr_t)size - 1 < offset)
357                 return -EINVAL;
358
359         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
360                 return -EINVAL;
361
362         if (!private_mapping_ok(vma))
363                 return -ENOSYS;
364
365         if (!range_is_allowed(vma->vm_pgoff, size))
366                 return -EPERM;
367
368         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
369                                                 &vma->vm_page_prot))
370                 return -EINVAL;
371
372         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
373                                                  size,
374                                                  vma->vm_page_prot);
375
376         vma->vm_ops = &mmap_mem_ops;
377
378         /* Remap-pfn-range will mark the range VM_IO */
379         if (remap_pfn_range(vma,
380                             vma->vm_start,
381                             vma->vm_pgoff,
382                             size,
383                             vma->vm_page_prot)) {
384                 return -EAGAIN;
385         }
386         return 0;
387 }
388
389 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
390 {
391         unsigned long pfn;
392
393         /* Turn a kernel-virtual address into a physical page frame */
394         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
395
396         /*
397          * RED-PEN: on some architectures there is more mapped memory than
398          * available in mem_map which pfn_valid checks for. Perhaps should add a
399          * new macro here.
400          *
401          * RED-PEN: vmalloc is not supported right now.
402          */
403         if (!pfn_valid(pfn))
404                 return -EIO;
405
406         vma->vm_pgoff = pfn;
407         return mmap_mem(file, vma);
408 }
409
410 /*
411  * This function reads the *virtual* memory as seen by the kernel.
412  */
413 static ssize_t read_kmem(struct file *file, char __user *buf,
414                          size_t count, loff_t *ppos)
415 {
416         unsigned long p = *ppos;
417         ssize_t low_count, read, sz;
418         char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
419         int err = 0;
420
421         read = 0;
422         if (p < (unsigned long) high_memory) {
423                 low_count = count;
424                 if (count > (unsigned long)high_memory - p)
425                         low_count = (unsigned long)high_memory - p;
426
427 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
428                 /* we don't have page 0 mapped on sparc and m68k.. */
429                 if (p < PAGE_SIZE && low_count > 0) {
430                         sz = size_inside_page(p, low_count);
431                         if (clear_user(buf, sz))
432                                 return -EFAULT;
433                         buf += sz;
434                         p += sz;
435                         read += sz;
436                         low_count -= sz;
437                         count -= sz;
438                 }
439 #endif
440                 while (low_count > 0) {
441                         sz = size_inside_page(p, low_count);
442
443                         /*
444                          * On ia64 if a page has been mapped somewhere as
445                          * uncached, then it must also be accessed uncached
446                          * by the kernel or data corruption may occur
447                          */
448                         kbuf = xlate_dev_kmem_ptr((void *)p);
449
450                         if (copy_to_user(buf, kbuf, sz))
451                                 return -EFAULT;
452                         buf += sz;
453                         p += sz;
454                         read += sz;
455                         low_count -= sz;
456                         count -= sz;
457                         if (should_stop_iteration()) {
458                                 count = 0;
459                                 break;
460                         }
461                 }
462         }
463
464         if (count > 0) {
465                 kbuf = (char *)__get_free_page(GFP_KERNEL);
466                 if (!kbuf)
467                         return -ENOMEM;
468                 while (count > 0) {
469                         sz = size_inside_page(p, count);
470                         if (!is_vmalloc_or_module_addr((void *)p)) {
471                                 err = -ENXIO;
472                                 break;
473                         }
474                         sz = vread(kbuf, (char *)p, sz);
475                         if (!sz)
476                                 break;
477                         if (copy_to_user(buf, kbuf, sz)) {
478                                 err = -EFAULT;
479                                 break;
480                         }
481                         count -= sz;
482                         buf += sz;
483                         read += sz;
484                         p += sz;
485                         if (should_stop_iteration())
486                                 break;
487                 }
488                 free_page((unsigned long)kbuf);
489         }
490         *ppos = p;
491         return read ? read : err;
492 }
493
494
495 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
496                                 size_t count, loff_t *ppos)
497 {
498         ssize_t written, sz;
499         unsigned long copied;
500
501         written = 0;
502 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
503         /* we don't have page 0 mapped on sparc and m68k.. */
504         if (p < PAGE_SIZE) {
505                 sz = size_inside_page(p, count);
506                 /* Hmm. Do something? */
507                 buf += sz;
508                 p += sz;
509                 count -= sz;
510                 written += sz;
511         }
512 #endif
513
514         while (count > 0) {
515                 void *ptr;
516
517                 sz = size_inside_page(p, count);
518
519                 /*
520                  * On ia64 if a page has been mapped somewhere as uncached, then
521                  * it must also be accessed uncached by the kernel or data
522                  * corruption may occur.
523                  */
524                 ptr = xlate_dev_kmem_ptr((void *)p);
525
526                 copied = copy_from_user(ptr, buf, sz);
527                 if (copied) {
528                         written += sz - copied;
529                         if (written)
530                                 break;
531                         return -EFAULT;
532                 }
533                 buf += sz;
534                 p += sz;
535                 count -= sz;
536                 written += sz;
537                 if (should_stop_iteration())
538                         break;
539         }
540
541         *ppos += written;
542         return written;
543 }
544
545 /*
546  * This function writes to the *virtual* memory as seen by the kernel.
547  */
548 static ssize_t write_kmem(struct file *file, const char __user *buf,
549                           size_t count, loff_t *ppos)
550 {
551         unsigned long p = *ppos;
552         ssize_t wrote = 0;
553         ssize_t virtr = 0;
554         char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
555         int err = 0;
556
557         if (p < (unsigned long) high_memory) {
558                 unsigned long to_write = min_t(unsigned long, count,
559                                                (unsigned long)high_memory - p);
560                 wrote = do_write_kmem(p, buf, to_write, ppos);
561                 if (wrote != to_write)
562                         return wrote;
563                 p += wrote;
564                 buf += wrote;
565                 count -= wrote;
566         }
567
568         if (count > 0) {
569                 kbuf = (char *)__get_free_page(GFP_KERNEL);
570                 if (!kbuf)
571                         return wrote ? wrote : -ENOMEM;
572                 while (count > 0) {
573                         unsigned long sz = size_inside_page(p, count);
574                         unsigned long n;
575
576                         if (!is_vmalloc_or_module_addr((void *)p)) {
577                                 err = -ENXIO;
578                                 break;
579                         }
580                         n = copy_from_user(kbuf, buf, sz);
581                         if (n) {
582                                 err = -EFAULT;
583                                 break;
584                         }
585                         vwrite(kbuf, (char *)p, sz);
586                         count -= sz;
587                         buf += sz;
588                         virtr += sz;
589                         p += sz;
590                         if (should_stop_iteration())
591                                 break;
592                 }
593                 free_page((unsigned long)kbuf);
594         }
595
596         *ppos = p;
597         return virtr + wrote ? : err;
598 }
599
600 static ssize_t read_port(struct file *file, char __user *buf,
601                          size_t count, loff_t *ppos)
602 {
603         unsigned long i = *ppos;
604         char __user *tmp = buf;
605
606         if (!access_ok(VERIFY_WRITE, buf, count))
607                 return -EFAULT;
608         while (count-- > 0 && i < 65536) {
609                 if (__put_user(inb(i), tmp) < 0)
610                         return -EFAULT;
611                 i++;
612                 tmp++;
613         }
614         *ppos = i;
615         return tmp-buf;
616 }
617
618 static ssize_t write_port(struct file *file, const char __user *buf,
619                           size_t count, loff_t *ppos)
620 {
621         unsigned long i = *ppos;
622         const char __user *tmp = buf;
623
624         if (!access_ok(VERIFY_READ, buf, count))
625                 return -EFAULT;
626         while (count-- > 0 && i < 65536) {
627                 char c;
628
629                 if (__get_user(c, tmp)) {
630                         if (tmp > buf)
631                                 break;
632                         return -EFAULT;
633                 }
634                 outb(c, i);
635                 i++;
636                 tmp++;
637         }
638         *ppos = i;
639         return tmp-buf;
640 }
641
642 static ssize_t read_null(struct file *file, char __user *buf,
643                          size_t count, loff_t *ppos)
644 {
645         return 0;
646 }
647
648 static ssize_t write_null(struct file *file, const char __user *buf,
649                           size_t count, loff_t *ppos)
650 {
651         return count;
652 }
653
654 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
655 {
656         return 0;
657 }
658
659 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
660 {
661         size_t count = iov_iter_count(from);
662         iov_iter_advance(from, count);
663         return count;
664 }
665
666 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
667                         struct splice_desc *sd)
668 {
669         return sd->len;
670 }
671
672 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
673                                  loff_t *ppos, size_t len, unsigned int flags)
674 {
675         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
676 }
677
678 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
679 {
680         size_t written = 0;
681
682         while (iov_iter_count(iter)) {
683                 size_t chunk = iov_iter_count(iter), n;
684
685                 if (chunk > PAGE_SIZE)
686                         chunk = PAGE_SIZE;      /* Just for latency reasons */
687                 n = iov_iter_zero(chunk, iter);
688                 if (!n && iov_iter_count(iter))
689                         return written ? written : -EFAULT;
690                 written += n;
691                 if (signal_pending(current))
692                         return written ? written : -ERESTARTSYS;
693                 cond_resched();
694         }
695         return written;
696 }
697
698 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
699 {
700 #ifndef CONFIG_MMU
701         return -ENOSYS;
702 #endif
703         if (vma->vm_flags & VM_SHARED)
704                 return shmem_zero_setup(vma);
705         return 0;
706 }
707
708 static ssize_t write_full(struct file *file, const char __user *buf,
709                           size_t count, loff_t *ppos)
710 {
711         return -ENOSPC;
712 }
713
714 /*
715  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
716  * can fopen() both devices with "a" now.  This was previously impossible.
717  * -- SRB.
718  */
719 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
720 {
721         return file->f_pos = 0;
722 }
723
724 /*
725  * The memory devices use the full 32/64 bits of the offset, and so we cannot
726  * check against negative addresses: they are ok. The return value is weird,
727  * though, in that case (0).
728  *
729  * also note that seeking relative to the "end of file" isn't supported:
730  * it has no meaning, so it returns -EINVAL.
731  */
732 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
733 {
734         loff_t ret;
735
736         mutex_lock(&file_inode(file)->i_mutex);
737         switch (orig) {
738         case SEEK_CUR:
739                 offset += file->f_pos;
740         case SEEK_SET:
741                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
742                 if (IS_ERR_VALUE((unsigned long long)offset)) {
743                         ret = -EOVERFLOW;
744                         break;
745                 }
746                 file->f_pos = offset;
747                 ret = file->f_pos;
748                 force_successful_syscall_return();
749                 break;
750         default:
751                 ret = -EINVAL;
752         }
753         mutex_unlock(&file_inode(file)->i_mutex);
754         return ret;
755 }
756
757 static int open_port(struct inode *inode, struct file *filp)
758 {
759         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
760 }
761
762 #define zero_lseek      null_lseek
763 #define full_lseek      null_lseek
764 #define write_zero      write_null
765 #define write_iter_zero write_iter_null
766 #define open_mem        open_port
767 #define open_kmem       open_mem
768
769 static const struct file_operations __maybe_unused mem_fops = {
770         .llseek         = memory_lseek,
771         .read           = read_mem,
772         .write          = write_mem,
773         .mmap           = mmap_mem,
774         .open           = open_mem,
775 #ifndef CONFIG_MMU
776         .get_unmapped_area = get_unmapped_area_mem,
777         .mmap_capabilities = memory_mmap_capabilities,
778 #endif
779 };
780
781 static const struct file_operations __maybe_unused kmem_fops = {
782         .llseek         = memory_lseek,
783         .read           = read_kmem,
784         .write          = write_kmem,
785         .mmap           = mmap_kmem,
786         .open           = open_kmem,
787 #ifndef CONFIG_MMU
788         .get_unmapped_area = get_unmapped_area_mem,
789         .mmap_capabilities = memory_mmap_capabilities,
790 #endif
791 };
792
793 static const struct file_operations null_fops = {
794         .llseek         = null_lseek,
795         .read           = read_null,
796         .write          = write_null,
797         .read_iter      = read_iter_null,
798         .write_iter     = write_iter_null,
799         .splice_write   = splice_write_null,
800 };
801
802 static const struct file_operations __maybe_unused port_fops = {
803         .llseek         = memory_lseek,
804         .read           = read_port,
805         .write          = write_port,
806         .open           = open_port,
807 };
808
809 static const struct file_operations zero_fops = {
810         .llseek         = zero_lseek,
811         .write          = write_zero,
812         .read_iter      = read_iter_zero,
813         .write_iter     = write_iter_zero,
814         .mmap           = mmap_zero,
815 #ifndef CONFIG_MMU
816         .mmap_capabilities = zero_mmap_capabilities,
817 #endif
818 };
819
820 static const struct file_operations full_fops = {
821         .llseek         = full_lseek,
822         .read_iter      = read_iter_zero,
823         .write          = write_full,
824 };
825
826 static const struct memdev {
827         const char *name;
828         umode_t mode;
829         const struct file_operations *fops;
830         fmode_t fmode;
831 } devlist[] = {
832 #ifdef CONFIG_DEVMEM
833          [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
834 #endif
835 #ifdef CONFIG_DEVKMEM
836          [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
837 #endif
838          [3] = { "null", 0666, &null_fops, 0 },
839 #ifdef CONFIG_DEVPORT
840          [4] = { "port", 0, &port_fops, 0 },
841 #endif
842          [5] = { "zero", 0666, &zero_fops, 0 },
843          [7] = { "full", 0666, &full_fops, 0 },
844          [8] = { "random", 0666, &random_fops, 0 },
845          [9] = { "urandom", 0666, &urandom_fops, 0 },
846 #ifdef CONFIG_PRINTK
847         [11] = { "kmsg", 0644, &kmsg_fops, 0 },
848 #endif
849 };
850
851 static int memory_open(struct inode *inode, struct file *filp)
852 {
853         int minor;
854         const struct memdev *dev;
855
856         minor = iminor(inode);
857         if (minor >= ARRAY_SIZE(devlist))
858                 return -ENXIO;
859
860         dev = &devlist[minor];
861         if (!dev->fops)
862                 return -ENXIO;
863
864         filp->f_op = dev->fops;
865         filp->f_mode |= dev->fmode;
866
867         if (dev->fops->open)
868                 return dev->fops->open(inode, filp);
869
870         return 0;
871 }
872
873 static const struct file_operations memory_fops = {
874         .open = memory_open,
875         .llseek = noop_llseek,
876 };
877
878 static char *mem_devnode(struct device *dev, umode_t *mode)
879 {
880         if (mode && devlist[MINOR(dev->devt)].mode)
881                 *mode = devlist[MINOR(dev->devt)].mode;
882         return NULL;
883 }
884
885 static struct class *mem_class;
886
887 static int __init chr_dev_init(void)
888 {
889         int minor;
890
891         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
892                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
893
894         mem_class = class_create(THIS_MODULE, "mem");
895         if (IS_ERR(mem_class))
896                 return PTR_ERR(mem_class);
897
898         mem_class->devnode = mem_devnode;
899         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
900                 if (!devlist[minor].name)
901                         continue;
902
903                 /*
904                  * Create /dev/port?
905                  */
906                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
907                         continue;
908
909                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
910                               NULL, devlist[minor].name);
911         }
912
913         return tty_init();
914 }
915
916 fs_initcall(chr_dev_init);