The crash utility has not supported Xen dom0 and domU dumpfiles since

since this Linux 3.19 commit:

  commit 054954eb051f35e74b75a566a96fe756015352c8
  xen: switch to linear virtual mapped sparse p2m list

This patch resurrects support for dom0 dumpfiles only.  Without the
patch, the crash session fails during session initialization with the
message "crash: cannot resolve p2m_top".
(daniel.kiper@oracle.com)
This commit is contained in:
Dave Anderson 2016-01-26 10:18:09 -05:00
parent 6f1f78e334
commit c341345659
3 changed files with 77 additions and 9 deletions

View File

@ -17,6 +17,7 @@
#include "defs.h" #include "defs.h"
#include "xen_hyper_defs.h" #include "xen_hyper_defs.h"
#include "xen_dom0.h"
#include <elf.h> #include <elf.h>
#include <libgen.h> #include <libgen.h>
#include <ctype.h> #include <ctype.h>
@ -61,6 +62,7 @@ static int restore_stack(struct bt_info *);
static ulong __xen_m2p(ulonglong, ulong); static ulong __xen_m2p(ulonglong, ulong);
static ulong __xen_pvops_m2p_l2(ulonglong, ulong); static ulong __xen_pvops_m2p_l2(ulonglong, ulong);
static ulong __xen_pvops_m2p_l3(ulonglong, ulong); static ulong __xen_pvops_m2p_l3(ulonglong, ulong);
static ulong __xen_pvops_m2p_hyper(ulonglong, ulong);
static int search_mapping_page(ulong, ulong *, ulong *, ulong *); static int search_mapping_page(ulong, ulong *, ulong *, ulong *);
static void read_in_kernel_config_err(int, char *); static void read_in_kernel_config_err(int, char *);
static void BUG_bytes_init(void); static void BUG_bytes_init(void);
@ -175,6 +177,9 @@ kernel_init()
&kt->pvops_xen.p2m_mid_missing); &kt->pvops_xen.p2m_mid_missing);
get_symbol_data("p2m_missing", sizeof(ulong), get_symbol_data("p2m_missing", sizeof(ulong),
&kt->pvops_xen.p2m_missing); &kt->pvops_xen.p2m_missing);
} else if (symbol_exists("xen_p2m_addr")) {
if (!XEN_CORE_DUMPFILE())
error(FATAL, "p2m array in new format is unreadable.");
} else { } else {
kt->pvops_xen.p2m_top_entries = get_array_length("p2m_top", NULL, 0); kt->pvops_xen.p2m_top_entries = get_array_length("p2m_top", NULL, 0);
kt->pvops_xen.p2m_top = symbol_value("p2m_top"); kt->pvops_xen.p2m_top = symbol_value("p2m_top");
@ -5850,12 +5855,14 @@ no_cpu_flags:
else else
fprintf(fp, "\n"); fprintf(fp, "\n");
fprintf(fp, " pvops_xen:\n"); if (!symbol_exists("xen_p2m_addr")) {
fprintf(fp, " p2m_top: %lx\n", kt->pvops_xen.p2m_top); fprintf(fp, " pvops_xen:\n");
fprintf(fp, " p2m_top_entries: %d\n", kt->pvops_xen.p2m_top_entries); fprintf(fp, " p2m_top: %lx\n", kt->pvops_xen.p2m_top);
if (symbol_exists("p2m_mid_missing")) fprintf(fp, " p2m_top_entries: %d\n", kt->pvops_xen.p2m_top_entries);
fprintf(fp, " p2m_mid_missing: %lx\n", kt->pvops_xen.p2m_mid_missing); if (symbol_exists("p2m_mid_missing"))
fprintf(fp, " p2m_missing: %lx\n", kt->pvops_xen.p2m_missing); fprintf(fp, " p2m_mid_missing: %lx\n", kt->pvops_xen.p2m_mid_missing);
fprintf(fp, " p2m_missing: %lx\n", kt->pvops_xen.p2m_missing);
}
} }
/* /*
@ -8873,6 +8880,12 @@ __xen_m2p(ulonglong machine, ulong mfn)
ulong c, i, kmfn, mapping, p, pfn; ulong c, i, kmfn, mapping, p, pfn;
ulong start, end; ulong start, end;
ulong *mp = (ulong *)kt->m2p_page; ulong *mp = (ulong *)kt->m2p_page;
int memtype;
if (XEN_CORE_DUMPFILE() && symbol_exists("xen_p2m_addr"))
memtype = PHYSADDR;
else
memtype = KVADDR;
/* /*
* Check the FIFO cache first. * Check the FIFO cache first.
@ -8883,13 +8896,19 @@ __xen_m2p(ulonglong machine, ulong mfn)
(mfn <= kt->p2m_mapping_cache[c].end))) { (mfn <= kt->p2m_mapping_cache[c].end))) {
if (kt->p2m_mapping_cache[c].mapping != kt->last_mapping_read) { if (kt->p2m_mapping_cache[c].mapping != kt->last_mapping_read) {
if (!readmem(kt->p2m_mapping_cache[c].mapping, KVADDR, if (memtype == PHYSADDR)
pc->curcmd_flags |= XEN_MACHINE_ADDR;
if (!readmem(kt->p2m_mapping_cache[c].mapping, memtype,
mp, PAGESIZE(), "phys_to_machine_mapping page (cached)", mp, PAGESIZE(), "phys_to_machine_mapping page (cached)",
RETURN_ON_ERROR)) RETURN_ON_ERROR))
error(FATAL, "cannot access " error(FATAL, "cannot access "
"phys_to_machine_mapping page\n"); "phys_to_machine_mapping page\n");
else else
kt->last_mapping_read = kt->p2m_mapping_cache[c].mapping; kt->last_mapping_read = kt->p2m_mapping_cache[c].mapping;
if (memtype == PHYSADDR)
pc->curcmd_flags &= ~XEN_MACHINE_ADDR;
} else } else
kt->p2m_page_cache_hits++; kt->p2m_page_cache_hits++;
@ -8919,11 +8938,13 @@ __xen_m2p(ulonglong machine, ulong mfn)
if (PVOPS_XEN()) { if (PVOPS_XEN()) {
/* /*
* The machine address was not cached, so search from the * The machine address was not cached, so search from the
* beginning of the p2m_top array, caching the contiguous * beginning of the p2m tree/array, caching the contiguous
* range containing the found machine address. * range containing the found machine address.
*/ */
if (symbol_exists("p2m_mid_missing")) if (symbol_exists("p2m_mid_missing"))
pfn = __xen_pvops_m2p_l3(machine, mfn); pfn = __xen_pvops_m2p_l3(machine, mfn);
else if (symbol_exists("xen_p2m_addr"))
pfn = __xen_pvops_m2p_hyper(machine, mfn);
else else
pfn = __xen_pvops_m2p_l2(machine, mfn); pfn = __xen_pvops_m2p_l2(machine, mfn);
@ -9088,6 +9109,50 @@ __xen_pvops_m2p_l3(ulonglong machine, ulong mfn)
return XEN_MFN_NOT_FOUND; return XEN_MFN_NOT_FOUND;
} }
static ulong
__xen_pvops_m2p_hyper(ulonglong machine, ulong mfn)
{
ulong c, end, i, mapping, p, pfn, start;
for (p = 0;
p < xkd->p2m_frames;
++p) {
mapping = PTOB(xkd->p2m_mfn_frame_list[p]);
if (mapping != kt->last_mapping_read) {
pc->curcmd_flags |= XEN_MACHINE_ADDR;
if (!readmem(mapping, PHYSADDR, (void *)kt->m2p_page,
PAGESIZE(), "p2m_mfn_frame_list page", RETURN_ON_ERROR))
error(FATAL, "cannot access p2m_mfn_frame_list[] page\n");
pc->curcmd_flags &= ~XEN_MACHINE_ADDR;
kt->last_mapping_read = mapping;
}
kt->p2m_pages_searched++;
if (search_mapping_page(mfn, &i, &start, &end)) {
pfn = p * XEN_PFNS_PER_PAGE + i;
if (CRASHDEBUG(1))
console("pages: %d mfn: %lx (%llx) p: %ld"
" i: %ld pfn: %lx (%llx)\n", p + 1, mfn, machine,
p, i, pfn, XEN_PFN_TO_PSEUDO(pfn));
c = kt->p2m_cache_index;
kt->p2m_mapping_cache[c].start = start;
kt->p2m_mapping_cache[c].end = end;
kt->p2m_mapping_cache[c].mapping = mapping;
kt->p2m_mapping_cache[c].pfn = p * XEN_PFNS_PER_PAGE;
kt->p2m_cache_index = (c+1) % P2M_MAPPING_CACHE;
return pfn;
}
}
return XEN_MFN_NOT_FOUND;
}
/* /*
* Search for an mfn in the current mapping page, and if found, * Search for an mfn in the current mapping page, and if found,
* determine the range of contiguous mfns that it's contained * determine the range of contiguous mfns that it's contained

View File

@ -20,7 +20,8 @@
#include "xen_dom0.h" #include "xen_dom0.h"
static struct xen_kdump_data xen_kdump_data = { 0 }; static struct xen_kdump_data xen_kdump_data = { 0 };
static struct xen_kdump_data *xkd = &xen_kdump_data;
struct xen_kdump_data *xkd = &xen_kdump_data;
void void
dump_xen_kdump_data(FILE *fp) dump_xen_kdump_data(FILE *fp)

View File

@ -68,6 +68,8 @@ struct xen_kdump_data {
#define P2M_FAILURE ((physaddr_t)(0xffffffffffffffffLL)) #define P2M_FAILURE ((physaddr_t)(0xffffffffffffffffLL))
extern struct xen_kdump_data *xkd;
void dump_xen_kdump_data(FILE *); void dump_xen_kdump_data(FILE *);
struct xen_kdump_data *get_xen_kdump_data(void); struct xen_kdump_data *get_xen_kdump_data(void);