~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/proc/vmcore.c

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.14 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.57 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.138 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.193 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  *      fs/proc/vmcore.c Interface for accessing the crash
  4  *                               dump from the system's previous life.
  5  *      Heavily borrowed from fs/proc/kcore.c
  6  *      Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
  7  *      Copyright (C) IBM Corporation, 2004. All rights reserved
  8  *
  9  */
 10 
 11 #include <linux/mm.h>
 12 #include <linux/kcore.h>
 13 #include <linux/user.h>
 14 #include <linux/elf.h>
 15 #include <linux/elfcore.h>
 16 #include <linux/export.h>
 17 #include <linux/slab.h>
 18 #include <linux/highmem.h>
 19 #include <linux/printk.h>
 20 #include <linux/memblock.h>
 21 #include <linux/init.h>
 22 #include <linux/crash_dump.h>
 23 #include <linux/list.h>
 24 #include <linux/mutex.h>
 25 #include <linux/vmalloc.h>
 26 #include <linux/pagemap.h>
 27 #include <linux/uaccess.h>
 28 #include <linux/mem_encrypt.h>
 29 #include <asm/pgtable.h>
 30 #include <asm/io.h>
 31 #include "internal.h"
 32 
 33 /* List representing chunks of contiguous memory areas and their offsets in
 34  * vmcore file.
 35  */
 36 static LIST_HEAD(vmcore_list);
 37 
 38 /* Stores the pointer to the buffer containing kernel elf core headers. */
 39 static char *elfcorebuf;
 40 static size_t elfcorebuf_sz;
 41 static size_t elfcorebuf_sz_orig;
 42 
 43 static char *elfnotes_buf;
 44 static size_t elfnotes_sz;
 45 /* Size of all notes minus the device dump notes */
 46 static size_t elfnotes_orig_sz;
 47 
 48 /* Total size of vmcore file. */
 49 static u64 vmcore_size;
 50 
 51 static struct proc_dir_entry *proc_vmcore;
 52 
 53 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 54 /* Device Dump list and mutex to synchronize access to list */
 55 static LIST_HEAD(vmcoredd_list);
 56 static DEFINE_MUTEX(vmcoredd_mutex);
 57 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
 58 
 59 /* Device Dump Size */
 60 static size_t vmcoredd_orig_sz;
 61 
 62 /*
 63  * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
 64  * The called function has to take care of module refcounting.
 65  */
 66 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
 67 
 68 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
 69 {
 70         if (oldmem_pfn_is_ram)
 71                 return -EBUSY;
 72         oldmem_pfn_is_ram = fn;
 73         return 0;
 74 }
 75 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
 76 
 77 void unregister_oldmem_pfn_is_ram(void)
 78 {
 79         oldmem_pfn_is_ram = NULL;
 80         wmb();
 81 }
 82 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
 83 
 84 static int pfn_is_ram(unsigned long pfn)
 85 {
 86         int (*fn)(unsigned long pfn);
 87         /* pfn is ram unless fn() checks pagetype */
 88         int ret = 1;
 89 
 90         /*
 91          * Ask hypervisor if the pfn is really ram.
 92          * A ballooned page contains no data and reading from such a page
 93          * will cause high load in the hypervisor.
 94          */
 95         fn = oldmem_pfn_is_ram;
 96         if (fn)
 97                 ret = fn(pfn);
 98 
 99         return ret;
100 }
101 
102 /* Reads a page from the oldmem device from given offset. */
103 static ssize_t read_from_oldmem(char *buf, size_t count,
104                                 u64 *ppos, int userbuf,
105                                 bool encrypted)
106 {
107         unsigned long pfn, offset;
108         size_t nr_bytes;
109         ssize_t read = 0, tmp;
110 
111         if (!count)
112                 return 0;
113 
114         offset = (unsigned long)(*ppos % PAGE_SIZE);
115         pfn = (unsigned long)(*ppos / PAGE_SIZE);
116 
117         do {
118                 if (count > (PAGE_SIZE - offset))
119                         nr_bytes = PAGE_SIZE - offset;
120                 else
121                         nr_bytes = count;
122 
123                 /* If pfn is not ram, return zeros for sparse dump files */
124                 if (pfn_is_ram(pfn) == 0)
125                         memset(buf, 0, nr_bytes);
126                 else {
127                         if (encrypted)
128                                 tmp = copy_oldmem_page_encrypted(pfn, buf,
129                                                                  nr_bytes,
130                                                                  offset,
131                                                                  userbuf);
132                         else
133                                 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
134                                                        offset, userbuf);
135 
136                         if (tmp < 0)
137                                 return tmp;
138                 }
139                 *ppos += nr_bytes;
140                 count -= nr_bytes;
141                 buf += nr_bytes;
142                 read += nr_bytes;
143                 ++pfn;
144                 offset = 0;
145         } while (count);
146 
147         return read;
148 }
149 
150 /*
151  * Architectures may override this function to allocate ELF header in 2nd kernel
152  */
153 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
154 {
155         return 0;
156 }
157 
158 /*
159  * Architectures may override this function to free header
160  */
161 void __weak elfcorehdr_free(unsigned long long addr)
162 {}
163 
164 /*
165  * Architectures may override this function to read from ELF header
166  */
167 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
168 {
169         return read_from_oldmem(buf, count, ppos, 0, false);
170 }
171 
172 /*
173  * Architectures may override this function to read from notes sections
174  */
175 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
176 {
177         return read_from_oldmem(buf, count, ppos, 0, sme_active());
178 }
179 
180 /*
181  * Architectures may override this function to map oldmem
182  */
183 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
184                                   unsigned long from, unsigned long pfn,
185                                   unsigned long size, pgprot_t prot)
186 {
187         prot = pgprot_encrypted(prot);
188         return remap_pfn_range(vma, from, pfn, size, prot);
189 }
190 
191 /*
192  * Architectures which support memory encryption override this.
193  */
194 ssize_t __weak
195 copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
196                            unsigned long offset, int userbuf)
197 {
198         return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
199 }
200 
201 /*
202  * Copy to either kernel or user space
203  */
204 static int copy_to(void *target, void *src, size_t size, int userbuf)
205 {
206         if (userbuf) {
207                 if (copy_to_user((char __user *) target, src, size))
208                         return -EFAULT;
209         } else {
210                 memcpy(target, src, size);
211         }
212         return 0;
213 }
214 
215 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
216 static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
217 {
218         struct vmcoredd_node *dump;
219         u64 offset = 0;
220         int ret = 0;
221         size_t tsz;
222         char *buf;
223 
224         mutex_lock(&vmcoredd_mutex);
225         list_for_each_entry(dump, &vmcoredd_list, list) {
226                 if (start < offset + dump->size) {
227                         tsz = min(offset + (u64)dump->size - start, (u64)size);
228                         buf = dump->buf + start - offset;
229                         if (copy_to(dst, buf, tsz, userbuf)) {
230                                 ret = -EFAULT;
231                                 goto out_unlock;
232                         }
233 
234                         size -= tsz;
235                         start += tsz;
236                         dst += tsz;
237 
238                         /* Leave now if buffer filled already */
239                         if (!size)
240                                 goto out_unlock;
241                 }
242                 offset += dump->size;
243         }
244 
245 out_unlock:
246         mutex_unlock(&vmcoredd_mutex);
247         return ret;
248 }
249 
250 #ifdef CONFIG_MMU
251 static int vmcoredd_mmap_dumps(struct vm_area_struct *vma, unsigned long dst,
252                                u64 start, size_t size)
253 {
254         struct vmcoredd_node *dump;
255         u64 offset = 0;
256         int ret = 0;
257         size_t tsz;
258         char *buf;
259 
260         mutex_lock(&vmcoredd_mutex);
261         list_for_each_entry(dump, &vmcoredd_list, list) {
262                 if (start < offset + dump->size) {
263                         tsz = min(offset + (u64)dump->size - start, (u64)size);
264                         buf = dump->buf + start - offset;
265                         if (remap_vmalloc_range_partial(vma, dst, buf, tsz)) {
266                                 ret = -EFAULT;
267                                 goto out_unlock;
268                         }
269 
270                         size -= tsz;
271                         start += tsz;
272                         dst += tsz;
273 
274                         /* Leave now if buffer filled already */
275                         if (!size)
276                                 goto out_unlock;
277                 }
278                 offset += dump->size;
279         }
280 
281 out_unlock:
282         mutex_unlock(&vmcoredd_mutex);
283         return ret;
284 }
285 #endif /* CONFIG_MMU */
286 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
287 
288 /* Read from the ELF header and then the crash dump. On error, negative value is
289  * returned otherwise number of bytes read are returned.
290  */
291 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
292                              int userbuf)
293 {
294         ssize_t acc = 0, tmp;
295         size_t tsz;
296         u64 start;
297         struct vmcore *m = NULL;
298 
299         if (buflen == 0 || *fpos >= vmcore_size)
300                 return 0;
301 
302         /* trim buflen to not go beyond EOF */
303         if (buflen > vmcore_size - *fpos)
304                 buflen = vmcore_size - *fpos;
305 
306         /* Read ELF core header */
307         if (*fpos < elfcorebuf_sz) {
308                 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
309                 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
310                         return -EFAULT;
311                 buflen -= tsz;
312                 *fpos += tsz;
313                 buffer += tsz;
314                 acc += tsz;
315 
316                 /* leave now if filled buffer already */
317                 if (buflen == 0)
318                         return acc;
319         }
320 
321         /* Read Elf note segment */
322         if (*fpos < elfcorebuf_sz + elfnotes_sz) {
323                 void *kaddr;
324 
325                 /* We add device dumps before other elf notes because the
326                  * other elf notes may not fill the elf notes buffer
327                  * completely and we will end up with zero-filled data
328                  * between the elf notes and the device dumps. Tools will
329                  * then try to decode this zero-filled data as valid notes
330                  * and we don't want that. Hence, adding device dumps before
331                  * the other elf notes ensure that zero-filled data can be
332                  * avoided.
333                  */
334 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
335                 /* Read device dumps */
336                 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) {
337                         tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
338                                   (size_t)*fpos, buflen);
339                         start = *fpos - elfcorebuf_sz;
340                         if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf))
341                                 return -EFAULT;
342 
343                         buflen -= tsz;
344                         *fpos += tsz;
345                         buffer += tsz;
346                         acc += tsz;
347 
348                         /* leave now if filled buffer already */
349                         if (!buflen)
350                                 return acc;
351                 }
352 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
353 
354                 /* Read remaining elf notes */
355                 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
356                 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz;
357                 if (copy_to(buffer, kaddr, tsz, userbuf))
358                         return -EFAULT;
359 
360                 buflen -= tsz;
361                 *fpos += tsz;
362                 buffer += tsz;
363                 acc += tsz;
364 
365                 /* leave now if filled buffer already */
366                 if (buflen == 0)
367                         return acc;
368         }
369 
370         list_for_each_entry(m, &vmcore_list, list) {
371                 if (*fpos < m->offset + m->size) {
372                         tsz = (size_t)min_t(unsigned long long,
373                                             m->offset + m->size - *fpos,
374                                             buflen);
375                         start = m->paddr + *fpos - m->offset;
376                         tmp = read_from_oldmem(buffer, tsz, &start,
377                                                userbuf, sme_active());
378                         if (tmp < 0)
379                                 return tmp;
380                         buflen -= tsz;
381                         *fpos += tsz;
382                         buffer += tsz;
383                         acc += tsz;
384 
385                         /* leave now if filled buffer already */
386                         if (buflen == 0)
387                                 return acc;
388                 }
389         }
390 
391         return acc;
392 }
393 
394 static ssize_t read_vmcore(struct file *file, char __user *buffer,
395                            size_t buflen, loff_t *fpos)
396 {
397         return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
398 }
399 
400 /*
401  * The vmcore fault handler uses the page cache and fills data using the
402  * standard __vmcore_read() function.
403  *
404  * On s390 the fault handler is used for memory regions that can't be mapped
405  * directly with remap_pfn_range().
406  */
407 static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
408 {
409 #ifdef CONFIG_S390
410         struct address_space *mapping = vmf->vma->vm_file->f_mapping;
411         pgoff_t index = vmf->pgoff;
412         struct page *page;
413         loff_t offset;
414         char *buf;
415         int rc;
416 
417         page = find_or_create_page(mapping, index, GFP_KERNEL);
418         if (!page)
419                 return VM_FAULT_OOM;
420         if (!PageUptodate(page)) {
421                 offset = (loff_t) index << PAGE_SHIFT;
422                 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
423                 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
424                 if (rc < 0) {
425                         unlock_page(page);
426                         put_page(page);
427                         return vmf_error(rc);
428                 }
429                 SetPageUptodate(page);
430         }
431         unlock_page(page);
432         vmf->page = page;
433         return 0;
434 #else
435         return VM_FAULT_SIGBUS;
436 #endif
437 }
438 
439 static const struct vm_operations_struct vmcore_mmap_ops = {
440         .fault = mmap_vmcore_fault,
441 };
442 
443 /**
444  * vmcore_alloc_buf - allocate buffer in vmalloc memory
445  * @sizez: size of buffer
446  *
447  * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
448  * the buffer to user-space by means of remap_vmalloc_range().
449  *
450  * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
451  * disabled and there's no need to allow users to mmap the buffer.
452  */
453 static inline char *vmcore_alloc_buf(size_t size)
454 {
455 #ifdef CONFIG_MMU
456         return vmalloc_user(size);
457 #else
458         return vzalloc(size);
459 #endif
460 }
461 
462 /*
463  * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
464  * essential for mmap_vmcore() in order to map physically
465  * non-contiguous objects (ELF header, ELF note segment and memory
466  * regions in the 1st kernel pointed to by PT_LOAD entries) into
467  * virtually contiguous user-space in ELF layout.
468  */
469 #ifdef CONFIG_MMU
470 /*
471  * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
472  * reported as not being ram with the zero page.
473  *
474  * @vma: vm_area_struct describing requested mapping
475  * @from: start remapping from
476  * @pfn: page frame number to start remapping to
477  * @size: remapping size
478  * @prot: protection bits
479  *
480  * Returns zero on success, -EAGAIN on failure.
481  */
482 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
483                                     unsigned long from, unsigned long pfn,
484                                     unsigned long size, pgprot_t prot)
485 {
486         unsigned long map_size;
487         unsigned long pos_start, pos_end, pos;
488         unsigned long zeropage_pfn = my_zero_pfn(0);
489         size_t len = 0;
490 
491         pos_start = pfn;
492         pos_end = pfn + (size >> PAGE_SHIFT);
493 
494         for (pos = pos_start; pos < pos_end; ++pos) {
495                 if (!pfn_is_ram(pos)) {
496                         /*
497                          * We hit a page which is not ram. Remap the continuous
498                          * region between pos_start and pos-1 and replace
499                          * the non-ram page at pos with the zero page.
500                          */
501                         if (pos > pos_start) {
502                                 /* Remap continuous region */
503                                 map_size = (pos - pos_start) << PAGE_SHIFT;
504                                 if (remap_oldmem_pfn_range(vma, from + len,
505                                                            pos_start, map_size,
506                                                            prot))
507                                         goto fail;
508                                 len += map_size;
509                         }
510                         /* Remap the zero page */
511                         if (remap_oldmem_pfn_range(vma, from + len,
512                                                    zeropage_pfn,
513                                                    PAGE_SIZE, prot))
514                                 goto fail;
515                         len += PAGE_SIZE;
516                         pos_start = pos + 1;
517                 }
518         }
519         if (pos > pos_start) {
520                 /* Remap the rest */
521                 map_size = (pos - pos_start) << PAGE_SHIFT;
522                 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
523                                            map_size, prot))
524                         goto fail;
525         }
526         return 0;
527 fail:
528         do_munmap(vma->vm_mm, from, len, NULL);
529         return -EAGAIN;
530 }
531 
532 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
533                             unsigned long from, unsigned long pfn,
534                             unsigned long size, pgprot_t prot)
535 {
536         /*
537          * Check if oldmem_pfn_is_ram was registered to avoid
538          * looping over all pages without a reason.
539          */
540         if (oldmem_pfn_is_ram)
541                 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
542         else
543                 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
544 }
545 
546 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
547 {
548         size_t size = vma->vm_end - vma->vm_start;
549         u64 start, end, len, tsz;
550         struct vmcore *m;
551 
552         start = (u64)vma->vm_pgoff << PAGE_SHIFT;
553         end = start + size;
554 
555         if (size > vmcore_size || end > vmcore_size)
556                 return -EINVAL;
557 
558         if (vma->vm_flags & (VM_WRITE | VM_EXEC))
559                 return -EPERM;
560 
561         vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
562         vma->vm_flags |= VM_MIXEDMAP;
563         vma->vm_ops = &vmcore_mmap_ops;
564 
565         len = 0;
566 
567         if (start < elfcorebuf_sz) {
568                 u64 pfn;
569 
570                 tsz = min(elfcorebuf_sz - (size_t)start, size);
571                 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
572                 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
573                                     vma->vm_page_prot))
574                         return -EAGAIN;
575                 size -= tsz;
576                 start += tsz;
577                 len += tsz;
578 
579                 if (size == 0)
580                         return 0;
581         }
582 
583         if (start < elfcorebuf_sz + elfnotes_sz) {
584                 void *kaddr;
585 
586                 /* We add device dumps before other elf notes because the
587                  * other elf notes may not fill the elf notes buffer
588                  * completely and we will end up with zero-filled data
589                  * between the elf notes and the device dumps. Tools will
590                  * then try to decode this zero-filled data as valid notes
591                  * and we don't want that. Hence, adding device dumps before
592                  * the other elf notes ensure that zero-filled data can be
593                  * avoided. This also ensures that the device dumps and
594                  * other elf notes can be properly mmaped at page aligned
595                  * address.
596                  */
597 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
598                 /* Read device dumps */
599                 if (start < elfcorebuf_sz + vmcoredd_orig_sz) {
600                         u64 start_off;
601 
602                         tsz = min(elfcorebuf_sz + vmcoredd_orig_sz -
603                                   (size_t)start, size);
604                         start_off = start - elfcorebuf_sz;
605                         if (vmcoredd_mmap_dumps(vma, vma->vm_start + len,
606                                                 start_off, tsz))
607                                 goto fail;
608 
609                         size -= tsz;
610                         start += tsz;
611                         len += tsz;
612 
613                         /* leave now if filled buffer already */
614                         if (!size)
615                                 return 0;
616                 }
617 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
618 
619                 /* Read remaining elf notes */
620                 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
621                 kaddr = elfnotes_buf + start - elfcorebuf_sz - vmcoredd_orig_sz;
622                 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
623                                                 kaddr, tsz))
624                         goto fail;
625 
626                 size -= tsz;
627                 start += tsz;
628                 len += tsz;
629 
630                 if (size == 0)
631                         return 0;
632         }
633 
634         list_for_each_entry(m, &vmcore_list, list) {
635                 if (start < m->offset + m->size) {
636                         u64 paddr = 0;
637 
638                         tsz = (size_t)min_t(unsigned long long,
639                                             m->offset + m->size - start, size);
640                         paddr = m->paddr + start - m->offset;
641                         if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
642                                                     paddr >> PAGE_SHIFT, tsz,
643                                                     vma->vm_page_prot))
644                                 goto fail;
645                         size -= tsz;
646                         start += tsz;
647                         len += tsz;
648 
649                         if (size == 0)
650                                 return 0;
651                 }
652         }
653 
654         return 0;
655 fail:
656         do_munmap(vma->vm_mm, vma->vm_start, len, NULL);
657         return -EAGAIN;
658 }
659 #else
660 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
661 {
662         return -ENOSYS;
663 }
664 #endif
665 
666 static const struct file_operations proc_vmcore_operations = {
667         .read           = read_vmcore,
668         .llseek         = default_llseek,
669         .mmap           = mmap_vmcore,
670 };
671 
672 static struct vmcore* __init get_new_element(void)
673 {
674         return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
675 }
676 
677 static u64 get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
678                            struct list_head *vc_list)
679 {
680         u64 size;
681         struct vmcore *m;
682 
683         size = elfsz + elfnotesegsz;
684         list_for_each_entry(m, vc_list, list) {
685                 size += m->size;
686         }
687         return size;
688 }
689 
690 /**
691  * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
692  *
693  * @ehdr_ptr: ELF header
694  *
695  * This function updates p_memsz member of each PT_NOTE entry in the
696  * program header table pointed to by @ehdr_ptr to real size of ELF
697  * note segment.
698  */
699 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
700 {
701         int i, rc=0;
702         Elf64_Phdr *phdr_ptr;
703         Elf64_Nhdr *nhdr_ptr;
704 
705         phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
706         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
707                 void *notes_section;
708                 u64 offset, max_sz, sz, real_sz = 0;
709                 if (phdr_ptr->p_type != PT_NOTE)
710                         continue;
711                 max_sz = phdr_ptr->p_memsz;
712                 offset = phdr_ptr->p_offset;
713                 notes_section = kmalloc(max_sz, GFP_KERNEL);
714                 if (!notes_section)
715                         return -ENOMEM;
716                 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
717                 if (rc < 0) {
718                         kfree(notes_section);
719                         return rc;
720                 }
721                 nhdr_ptr = notes_section;
722                 while (nhdr_ptr->n_namesz != 0) {
723                         sz = sizeof(Elf64_Nhdr) +
724                                 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
725                                 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
726                         if ((real_sz + sz) > max_sz) {
727                                 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
728                                         nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
729                                 break;
730                         }
731                         real_sz += sz;
732                         nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
733                 }
734                 kfree(notes_section);
735                 phdr_ptr->p_memsz = real_sz;
736                 if (real_sz == 0) {
737                         pr_warn("Warning: Zero PT_NOTE entries found\n");
738                 }
739         }
740 
741         return 0;
742 }
743 
744 /**
745  * get_note_number_and_size_elf64 - get the number of PT_NOTE program
746  * headers and sum of real size of their ELF note segment headers and
747  * data.
748  *
749  * @ehdr_ptr: ELF header
750  * @nr_ptnote: buffer for the number of PT_NOTE program headers
751  * @sz_ptnote: buffer for size of unique PT_NOTE program header
752  *
753  * This function is used to merge multiple PT_NOTE program headers
754  * into a unique single one. The resulting unique entry will have
755  * @sz_ptnote in its phdr->p_mem.
756  *
757  * It is assumed that program headers with PT_NOTE type pointed to by
758  * @ehdr_ptr has already been updated by update_note_header_size_elf64
759  * and each of PT_NOTE program headers has actual ELF note segment
760  * size in its p_memsz member.
761  */
762 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
763                                                  int *nr_ptnote, u64 *sz_ptnote)
764 {
765         int i;
766         Elf64_Phdr *phdr_ptr;
767 
768         *nr_ptnote = *sz_ptnote = 0;
769 
770         phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
771         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
772                 if (phdr_ptr->p_type != PT_NOTE)
773                         continue;
774                 *nr_ptnote += 1;
775                 *sz_ptnote += phdr_ptr->p_memsz;
776         }
777 
778         return 0;
779 }
780 
781 /**
782  * copy_notes_elf64 - copy ELF note segments in a given buffer
783  *
784  * @ehdr_ptr: ELF header
785  * @notes_buf: buffer into which ELF note segments are copied
786  *
787  * This function is used to copy ELF note segment in the 1st kernel
788  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
789  * size of the buffer @notes_buf is equal to or larger than sum of the
790  * real ELF note segment headers and data.
791  *
792  * It is assumed that program headers with PT_NOTE type pointed to by
793  * @ehdr_ptr has already been updated by update_note_header_size_elf64
794  * and each of PT_NOTE program headers has actual ELF note segment
795  * size in its p_memsz member.
796  */
797 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
798 {
799         int i, rc=0;
800         Elf64_Phdr *phdr_ptr;
801 
802         phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
803 
804         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
805                 u64 offset;
806                 if (phdr_ptr->p_type != PT_NOTE)
807                         continue;
808                 offset = phdr_ptr->p_offset;
809                 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
810                                            &offset);
811                 if (rc < 0)
812                         return rc;
813                 notes_buf += phdr_ptr->p_memsz;
814         }
815 
816         return 0;
817 }
818 
819 /* Merges all the PT_NOTE headers into one. */
820 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
821                                            char **notes_buf, size_t *notes_sz)
822 {
823         int i, nr_ptnote=0, rc=0;
824         char *tmp;
825         Elf64_Ehdr *ehdr_ptr;
826         Elf64_Phdr phdr;
827         u64 phdr_sz = 0, note_off;
828 
829         ehdr_ptr = (Elf64_Ehdr *)elfptr;
830 
831         rc = update_note_header_size_elf64(ehdr_ptr);
832         if (rc < 0)
833                 return rc;
834 
835         rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
836         if (rc < 0)
837                 return rc;
838 
839         *notes_sz = roundup(phdr_sz, PAGE_SIZE);
840         *notes_buf = vmcore_alloc_buf(*notes_sz);
841         if (!*notes_buf)
842                 return -ENOMEM;
843 
844         rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
845         if (rc < 0)
846                 return rc;
847 
848         /* Prepare merged PT_NOTE program header. */
849         phdr.p_type    = PT_NOTE;
850         phdr.p_flags   = 0;
851         note_off = sizeof(Elf64_Ehdr) +
852                         (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
853         phdr.p_offset  = roundup(note_off, PAGE_SIZE);
854         phdr.p_vaddr   = phdr.p_paddr = 0;
855         phdr.p_filesz  = phdr.p_memsz = phdr_sz;
856         phdr.p_align   = 0;
857 
858         /* Add merged PT_NOTE program header*/
859         tmp = elfptr + sizeof(Elf64_Ehdr);
860         memcpy(tmp, &phdr, sizeof(phdr));
861         tmp += sizeof(phdr);
862 
863         /* Remove unwanted PT_NOTE program headers. */
864         i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
865         *elfsz = *elfsz - i;
866         memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
867         memset(elfptr + *elfsz, 0, i);
868         *elfsz = roundup(*elfsz, PAGE_SIZE);
869 
870         /* Modify e_phnum to reflect merged headers. */
871         ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
872 
873         /* Store the size of all notes.  We need this to update the note
874          * header when the device dumps will be added.
875          */
876         elfnotes_orig_sz = phdr.p_memsz;
877 
878         return 0;
879 }
880 
881 /**
882  * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
883  *
884  * @ehdr_ptr: ELF header
885  *
886  * This function updates p_memsz member of each PT_NOTE entry in the
887  * program header table pointed to by @ehdr_ptr to real size of ELF
888  * note segment.
889  */
890 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
891 {
892         int i, rc=0;
893         Elf32_Phdr *phdr_ptr;
894         Elf32_Nhdr *nhdr_ptr;
895 
896         phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
897         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
898                 void *notes_section;
899                 u64 offset, max_sz, sz, real_sz = 0;
900                 if (phdr_ptr->p_type != PT_NOTE)
901                         continue;
902                 max_sz = phdr_ptr->p_memsz;
903                 offset = phdr_ptr->p_offset;
904                 notes_section = kmalloc(max_sz, GFP_KERNEL);
905                 if (!notes_section)
906                         return -ENOMEM;
907                 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
908                 if (rc < 0) {
909                         kfree(notes_section);
910                         return rc;
911                 }
912                 nhdr_ptr = notes_section;
913                 while (nhdr_ptr->n_namesz != 0) {
914                         sz = sizeof(Elf32_Nhdr) +
915                                 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
916                                 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
917                         if ((real_sz + sz) > max_sz) {
918                                 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
919                                         nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
920                                 break;
921                         }
922                         real_sz += sz;
923                         nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
924                 }
925                 kfree(notes_section);
926                 phdr_ptr->p_memsz = real_sz;
927                 if (real_sz == 0) {
928                         pr_warn("Warning: Zero PT_NOTE entries found\n");
929                 }
930         }
931 
932         return 0;
933 }
934 
935 /**
936  * get_note_number_and_size_elf32 - get the number of PT_NOTE program
937  * headers and sum of real size of their ELF note segment headers and
938  * data.
939  *
940  * @ehdr_ptr: ELF header
941  * @nr_ptnote: buffer for the number of PT_NOTE program headers
942  * @sz_ptnote: buffer for size of unique PT_NOTE program header
943  *
944  * This function is used to merge multiple PT_NOTE program headers
945  * into a unique single one. The resulting unique entry will have
946  * @sz_ptnote in its phdr->p_mem.
947  *
948  * It is assumed that program headers with PT_NOTE type pointed to by
949  * @ehdr_ptr has already been updated by update_note_header_size_elf32
950  * and each of PT_NOTE program headers has actual ELF note segment
951  * size in its p_memsz member.
952  */
953 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
954                                                  int *nr_ptnote, u64 *sz_ptnote)
955 {
956         int i;
957         Elf32_Phdr *phdr_ptr;
958 
959         *nr_ptnote = *sz_ptnote = 0;
960 
961         phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
962         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
963                 if (phdr_ptr->p_type != PT_NOTE)
964                         continue;
965                 *nr_ptnote += 1;
966                 *sz_ptnote += phdr_ptr->p_memsz;
967         }
968 
969         return 0;
970 }
971 
972 /**
973  * copy_notes_elf32 - copy ELF note segments in a given buffer
974  *
975  * @ehdr_ptr: ELF header
976  * @notes_buf: buffer into which ELF note segments are copied
977  *
978  * This function is used to copy ELF note segment in the 1st kernel
979  * into the buffer @notes_buf in the 2nd kernel. It is assumed that
980  * size of the buffer @notes_buf is equal to or larger than sum of the
981  * real ELF note segment headers and data.
982  *
983  * It is assumed that program headers with PT_NOTE type pointed to by
984  * @ehdr_ptr has already been updated by update_note_header_size_elf32
985  * and each of PT_NOTE program headers has actual ELF note segment
986  * size in its p_memsz member.
987  */
988 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
989 {
990         int i, rc=0;
991         Elf32_Phdr *phdr_ptr;
992 
993         phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
994 
995         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
996                 u64 offset;
997                 if (phdr_ptr->p_type != PT_NOTE)
998                         continue;
999                 offset = phdr_ptr->p_offset;
1000                 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
1001                                            &offset);
1002                 if (rc < 0)
1003                         return rc;
1004                 notes_buf += phdr_ptr->p_memsz;
1005         }
1006 
1007         return 0;
1008 }
1009 
1010 /* Merges all the PT_NOTE headers into one. */
1011 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
1012                                            char **notes_buf, size_t *notes_sz)
1013 {
1014         int i, nr_ptnote=0, rc=0;
1015         char *tmp;
1016         Elf32_Ehdr *ehdr_ptr;
1017         Elf32_Phdr phdr;
1018         u64 phdr_sz = 0, note_off;
1019 
1020         ehdr_ptr = (Elf32_Ehdr *)elfptr;
1021 
1022         rc = update_note_header_size_elf32(ehdr_ptr);
1023         if (rc < 0)
1024                 return rc;
1025 
1026         rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
1027         if (rc < 0)
1028                 return rc;
1029 
1030         *notes_sz = roundup(phdr_sz, PAGE_SIZE);
1031         *notes_buf = vmcore_alloc_buf(*notes_sz);
1032         if (!*notes_buf)
1033                 return -ENOMEM;
1034 
1035         rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
1036         if (rc < 0)
1037                 return rc;
1038 
1039         /* Prepare merged PT_NOTE program header. */
1040         phdr.p_type    = PT_NOTE;
1041         phdr.p_flags   = 0;
1042         note_off = sizeof(Elf32_Ehdr) +
1043                         (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
1044         phdr.p_offset  = roundup(note_off, PAGE_SIZE);
1045         phdr.p_vaddr   = phdr.p_paddr = 0;
1046         phdr.p_filesz  = phdr.p_memsz = phdr_sz;
1047         phdr.p_align   = 0;
1048 
1049         /* Add merged PT_NOTE program header*/
1050         tmp = elfptr + sizeof(Elf32_Ehdr);
1051         memcpy(tmp, &phdr, sizeof(phdr));
1052         tmp += sizeof(phdr);
1053 
1054         /* Remove unwanted PT_NOTE program headers. */
1055         i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
1056         *elfsz = *elfsz - i;
1057         memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
1058         memset(elfptr + *elfsz, 0, i);
1059         *elfsz = roundup(*elfsz, PAGE_SIZE);
1060 
1061         /* Modify e_phnum to reflect merged headers. */
1062         ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
1063 
1064         /* Store the size of all notes.  We need this to update the note
1065          * header when the device dumps will be added.
1066          */
1067         elfnotes_orig_sz = phdr.p_memsz;
1068 
1069         return 0;
1070 }
1071 
1072 /* Add memory chunks represented by program headers to vmcore list. Also update
1073  * the new offset fields of exported program headers. */
1074 static int __init process_ptload_program_headers_elf64(char *elfptr,
1075                                                 size_t elfsz,
1076                                                 size_t elfnotes_sz,
1077                                                 struct list_head *vc_list)
1078 {
1079         int i;
1080         Elf64_Ehdr *ehdr_ptr;
1081         Elf64_Phdr *phdr_ptr;
1082         loff_t vmcore_off;
1083         struct vmcore *new;
1084 
1085         ehdr_ptr = (Elf64_Ehdr *)elfptr;
1086         phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
1087 
1088         /* Skip Elf header, program headers and Elf note segment. */
1089         vmcore_off = elfsz + elfnotes_sz;
1090 
1091         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1092                 u64 paddr, start, end, size;
1093 
1094                 if (phdr_ptr->p_type != PT_LOAD)
1095                         continue;
1096 
1097                 paddr = phdr_ptr->p_offset;
1098                 start = rounddown(paddr, PAGE_SIZE);
1099                 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1100                 size = end - start;
1101 
1102                 /* Add this contiguous chunk of memory to vmcore list.*/
1103                 new = get_new_element();
1104                 if (!new)
1105                         return -ENOMEM;
1106                 new->paddr = start;
1107                 new->size = size;
1108                 list_add_tail(&new->list, vc_list);
1109 
1110                 /* Update the program header offset. */
1111                 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1112                 vmcore_off = vmcore_off + size;
1113         }
1114         return 0;
1115 }
1116 
1117 static int __init process_ptload_program_headers_elf32(char *elfptr,
1118                                                 size_t elfsz,
1119                                                 size_t elfnotes_sz,
1120                                                 struct list_head *vc_list)
1121 {
1122         int i;
1123         Elf32_Ehdr *ehdr_ptr;
1124         Elf32_Phdr *phdr_ptr;
1125         loff_t vmcore_off;
1126         struct vmcore *new;
1127 
1128         ehdr_ptr = (Elf32_Ehdr *)elfptr;
1129         phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
1130 
1131         /* Skip Elf header, program headers and Elf note segment. */
1132         vmcore_off = elfsz + elfnotes_sz;
1133 
1134         for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
1135                 u64 paddr, start, end, size;
1136 
1137                 if (phdr_ptr->p_type != PT_LOAD)
1138                         continue;
1139 
1140                 paddr = phdr_ptr->p_offset;
1141                 start = rounddown(paddr, PAGE_SIZE);
1142                 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
1143                 size = end - start;
1144 
1145                 /* Add this contiguous chunk of memory to vmcore list.*/
1146                 new = get_new_element();
1147                 if (!new)
1148                         return -ENOMEM;
1149                 new->paddr = start;
1150                 new->size = size;
1151                 list_add_tail(&new->list, vc_list);
1152 
1153                 /* Update the program header offset */
1154                 phdr_ptr->p_offset = vmcore_off + (paddr - start);
1155                 vmcore_off = vmcore_off + size;
1156         }
1157         return 0;
1158 }
1159 
1160 /* Sets offset fields of vmcore elements. */
1161 static void set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
1162                                     struct list_head *vc_list)
1163 {
1164         loff_t vmcore_off;
1165         struct vmcore *m;
1166 
1167         /* Skip Elf header, program headers and Elf note segment. */
1168         vmcore_off = elfsz + elfnotes_sz;
1169 
1170         list_for_each_entry(m, vc_list, list) {
1171                 m->offset = vmcore_off;
1172                 vmcore_off += m->size;
1173         }
1174 }
1175 
1176 static void free_elfcorebuf(void)
1177 {
1178         free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1179         elfcorebuf = NULL;
1180         vfree(elfnotes_buf);
1181         elfnotes_buf = NULL;
1182 }
1183 
1184 static int __init parse_crash_elf64_headers(void)
1185 {
1186         int rc=0;
1187         Elf64_Ehdr ehdr;
1188         u64 addr;
1189 
1190         addr = elfcorehdr_addr;
1191 
1192         /* Read Elf header */
1193         rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1194         if (rc < 0)
1195                 return rc;
1196 
1197         /* Do some basic Verification. */
1198         if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1199                 (ehdr.e_type != ET_CORE) ||
1200                 !vmcore_elf64_check_arch(&ehdr) ||
1201                 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1202                 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1203                 ehdr.e_version != EV_CURRENT ||
1204                 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1205                 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1206                 ehdr.e_phnum == 0) {
1207                 pr_warn("Warning: Core image elf header is not sane\n");
1208                 return -EINVAL;
1209         }
1210 
1211         /* Read in all elf headers. */
1212         elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1213                                 ehdr.e_phnum * sizeof(Elf64_Phdr);
1214         elfcorebuf_sz = elfcorebuf_sz_orig;
1215         elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1216                                               get_order(elfcorebuf_sz_orig));
1217         if (!elfcorebuf)
1218                 return -ENOMEM;
1219         addr = elfcorehdr_addr;
1220         rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1221         if (rc < 0)
1222                 goto fail;
1223 
1224         /* Merge all PT_NOTE headers into one. */
1225         rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1226                                       &elfnotes_buf, &elfnotes_sz);
1227         if (rc)
1228                 goto fail;
1229         rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1230                                                   elfnotes_sz, &vmcore_list);
1231         if (rc)
1232                 goto fail;
1233         set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1234         return 0;
1235 fail:
1236         free_elfcorebuf();
1237         return rc;
1238 }
1239 
1240 static int __init parse_crash_elf32_headers(void)
1241 {
1242         int rc=0;
1243         Elf32_Ehdr ehdr;
1244         u64 addr;
1245 
1246         addr = elfcorehdr_addr;
1247 
1248         /* Read Elf header */
1249         rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1250         if (rc < 0)
1251                 return rc;
1252 
1253         /* Do some basic Verification. */
1254         if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1255                 (ehdr.e_type != ET_CORE) ||
1256                 !vmcore_elf32_check_arch(&ehdr) ||
1257                 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1258                 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1259                 ehdr.e_version != EV_CURRENT ||
1260                 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1261                 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1262                 ehdr.e_phnum == 0) {
1263                 pr_warn("Warning: Core image elf header is not sane\n");
1264                 return -EINVAL;
1265         }
1266 
1267         /* Read in all elf headers. */
1268         elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1269         elfcorebuf_sz = elfcorebuf_sz_orig;
1270         elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1271                                               get_order(elfcorebuf_sz_orig));
1272         if (!elfcorebuf)
1273                 return -ENOMEM;
1274         addr = elfcorehdr_addr;
1275         rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1276         if (rc < 0)
1277                 goto fail;
1278 
1279         /* Merge all PT_NOTE headers into one. */
1280         rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1281                                       &elfnotes_buf, &elfnotes_sz);
1282         if (rc)
1283                 goto fail;
1284         rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1285                                                   elfnotes_sz, &vmcore_list);
1286         if (rc)
1287                 goto fail;
1288         set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1289         return 0;
1290 fail:
1291         free_elfcorebuf();
1292         return rc;
1293 }
1294 
1295 static int __init parse_crash_elf_headers(void)
1296 {
1297         unsigned char e_ident[EI_NIDENT];
1298         u64 addr;
1299         int rc=0;
1300 
1301         addr = elfcorehdr_addr;
1302         rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1303         if (rc < 0)
1304                 return rc;
1305         if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1306                 pr_warn("Warning: Core image elf header not found\n");
1307                 return -EINVAL;
1308         }
1309 
1310         if (e_ident[EI_CLASS] == ELFCLASS64) {
1311                 rc = parse_crash_elf64_headers();
1312                 if (rc)
1313                         return rc;
1314         } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1315                 rc = parse_crash_elf32_headers();
1316                 if (rc)
1317                         return rc;
1318         } else {
1319                 pr_warn("Warning: Core image elf header is not sane\n");
1320                 return -EINVAL;
1321         }
1322 
1323         /* Determine vmcore size. */
1324         vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1325                                       &vmcore_list);
1326 
1327         return 0;
1328 }
1329 
1330 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1331 /**
1332  * vmcoredd_write_header - Write vmcore device dump header at the
1333  * beginning of the dump's buffer.
1334  * @buf: Output buffer where the note is written
1335  * @data: Dump info
1336  * @size: Size of the dump
1337  *
1338  * Fills beginning of the dump's buffer with vmcore device dump header.
1339  */
1340 static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
1341                                   u32 size)
1342 {
1343         struct vmcoredd_header *vdd_hdr = (struct vmcoredd_header *)buf;
1344 
1345         vdd_hdr->n_namesz = sizeof(vdd_hdr->name);
1346         vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
1347         vdd_hdr->n_type = NT_VMCOREDD;
1348 
1349         strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
1350                 sizeof(vdd_hdr->name));
1351         memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
1352 }
1353 
1354 /**
1355  * vmcoredd_update_program_headers - Update all Elf program headers
1356  * @elfptr: Pointer to elf header
1357  * @elfnotesz: Size of elf notes aligned to page size
1358  * @vmcoreddsz: Size of device dumps to be added to elf note header
1359  *
1360  * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1361  * Also update the offsets of all the program headers after the elf note header.
1362  */
1363 static void vmcoredd_update_program_headers(char *elfptr, size_t elfnotesz,
1364                                             size_t vmcoreddsz)
1365 {
1366         unsigned char *e_ident = (unsigned char *)elfptr;
1367         u64 start, end, size;
1368         loff_t vmcore_off;
1369         u32 i;
1370 
1371         vmcore_off = elfcorebuf_sz + elfnotesz;
1372 
1373         if (e_ident[EI_CLASS] == ELFCLASS64) {
1374                 Elf64_Ehdr *ehdr = (Elf64_Ehdr *)elfptr;
1375                 Elf64_Phdr *phdr = (Elf64_Phdr *)(elfptr + sizeof(Elf64_Ehdr));
1376 
1377                 /* Update all program headers */
1378                 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1379                         if (phdr->p_type == PT_NOTE) {
1380                                 /* Update note size */
1381                                 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1382                                 phdr->p_filesz = phdr->p_memsz;
1383                                 continue;
1384                         }
1385 
1386                         start = rounddown(phdr->p_offset, PAGE_SIZE);
1387                         end = roundup(phdr->p_offset + phdr->p_memsz,
1388                                       PAGE_SIZE);
1389                         size = end - start;
1390                         phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1391                         vmcore_off += size;
1392                 }
1393         } else {
1394                 Elf32_Ehdr *ehdr = (Elf32_Ehdr *)elfptr;
1395                 Elf32_Phdr *phdr = (Elf32_Phdr *)(elfptr + sizeof(Elf32_Ehdr));
1396 
1397                 /* Update all program headers */
1398                 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1399                         if (phdr->p_type == PT_NOTE) {
1400                                 /* Update note size */
1401                                 phdr->p_memsz = elfnotes_orig_sz + vmcoreddsz;
1402                                 phdr->p_filesz = phdr->p_memsz;
1403                                 continue;
1404                         }
1405 
1406                         start = rounddown(phdr->p_offset, PAGE_SIZE);
1407                         end = roundup(phdr->p_offset + phdr->p_memsz,
1408                                       PAGE_SIZE);
1409                         size = end - start;
1410                         phdr->p_offset = vmcore_off + (phdr->p_offset - start);
1411                         vmcore_off += size;
1412                 }
1413         }
1414 }
1415 
1416 /**
1417  * vmcoredd_update_size - Update the total size of the device dumps and update
1418  * Elf header
1419  * @dump_size: Size of the current device dump to be added to total size
1420  *
1421  * Update the total size of all the device dumps and update the Elf program
1422  * headers. Calculate the new offsets for the vmcore list and update the
1423  * total vmcore size.
1424  */
1425 static void vmcoredd_update_size(size_t dump_size)
1426 {
1427         vmcoredd_orig_sz += dump_size;
1428         elfnotes_sz = roundup(elfnotes_orig_sz, PAGE_SIZE) + vmcoredd_orig_sz;
1429         vmcoredd_update_program_headers(elfcorebuf, elfnotes_sz,
1430                                         vmcoredd_orig_sz);
1431 
1432         /* Update vmcore list offsets */
1433         set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1434 
1435         vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1436                                       &vmcore_list);
1437         proc_vmcore->size = vmcore_size;
1438 }
1439 
1440 /**
1441  * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1442  * @data: dump info.
1443  *
1444  * Allocate a buffer and invoke the calling driver's dump collect routine.
1445  * Write Elf note at the beginning of the buffer to indicate vmcore device
1446  * dump and add the dump to global list.
1447  */
1448 int vmcore_add_device_dump(struct vmcoredd_data *data)
1449 {
1450         struct vmcoredd_node *dump;
1451         void *buf = NULL;
1452         size_t data_size;
1453         int ret;
1454 
1455         if (!data || !strlen(data->dump_name) ||
1456             !data->vmcoredd_callback || !data->size)
1457                 return -EINVAL;
1458 
1459         dump = vzalloc(sizeof(*dump));
1460         if (!dump) {
1461                 ret = -ENOMEM;
1462                 goto out_err;
1463         }
1464 
1465         /* Keep size of the buffer page aligned so that it can be mmaped */
1466         data_size = roundup(sizeof(struct vmcoredd_header) + data->size,
1467                             PAGE_SIZE);
1468 
1469         /* Allocate buffer for driver's to write their dumps */
1470         buf = vmcore_alloc_buf(data_size);
1471         if (!buf) {
1472                 ret = -ENOMEM;
1473                 goto out_err;
1474         }
1475 
1476         vmcoredd_write_header(buf, data, data_size -
1477                               sizeof(struct vmcoredd_header));
1478 
1479         /* Invoke the driver's dump collection routing */
1480         ret = data->vmcoredd_callback(data, buf +
1481                                       sizeof(struct vmcoredd_header));
1482         if (ret)
1483                 goto out_err;
1484 
1485         dump->buf = buf;
1486         dump->size = data_size;
1487 
1488         /* Add the dump to driver sysfs list */
1489         mutex_lock(&vmcoredd_mutex);
1490         list_add_tail(&dump->list, &vmcoredd_list);
1491         mutex_unlock(&vmcoredd_mutex);
1492 
1493         vmcoredd_update_size(data_size);
1494         return 0;
1495 
1496 out_err:
1497         if (buf)
1498                 vfree(buf);
1499 
1500         if (dump)
1501                 vfree(dump);
1502 
1503         return ret;
1504 }
1505 EXPORT_SYMBOL(vmcore_add_device_dump);
1506 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1507 
1508 /* Free all dumps in vmcore device dump list */
1509 static void vmcore_free_device_dumps(void)
1510 {
1511 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1512         mutex_lock(&vmcoredd_mutex);
1513         while (!list_empty(&vmcoredd_list)) {
1514                 struct vmcoredd_node *dump;
1515 
1516                 dump = list_first_entry(&vmcoredd_list, struct vmcoredd_node,
1517                                         list);
1518                 list_del(&dump->list);
1519                 vfree(dump->buf);
1520                 vfree(dump);
1521         }
1522         mutex_unlock(&vmcoredd_mutex);
1523 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1524 }
1525 
1526 /* Init function for vmcore module. */
1527 static int __init vmcore_init(void)
1528 {
1529         int rc = 0;
1530 
1531         /* Allow architectures to allocate ELF header in 2nd kernel */
1532         rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1533         if (rc)
1534                 return rc;
1535         /*
1536          * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1537          * then capture the dump.
1538          */
1539         if (!(is_vmcore_usable()))
1540                 return rc;
1541         rc = parse_crash_elf_headers();
1542         if (rc) {
1543                 pr_warn("Kdump: vmcore not initialized\n");
1544                 return rc;
1545         }
1546         elfcorehdr_free(elfcorehdr_addr);
1547         elfcorehdr_addr = ELFCORE_ADDR_ERR;
1548 
1549         proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1550         if (proc_vmcore)
1551                 proc_vmcore->size = vmcore_size;
1552         return 0;
1553 }
1554 fs_initcall(vmcore_init);
1555 
1556 /* Cleanup function for vmcore module. */
1557 void vmcore_cleanup(void)
1558 {
1559         if (proc_vmcore) {
1560                 proc_remove(proc_vmcore);
1561                 proc_vmcore = NULL;
1562         }
1563 
1564         /* clear the vmcore list. */
1565         while (!list_empty(&vmcore_list)) {
1566                 struct vmcore *m;
1567 
1568                 m = list_first_entry(&vmcore_list, struct vmcore, list);
1569                 list_del(&m->list);
1570                 kfree(m);
1571         }
1572         free_elfcorebuf();
1573 
1574         /* clear vmcore device dump list */
1575         vmcore_free_device_dumps();
1576 }
1577 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp