Crash 7.1.5 commit c341345659 (xen: Add

support for dom0 with Linux kernel 3.19 and newer) from Daniel Kiper
implemented support for Xen dom0 vmcores after Linux 3.19 kernel
commit 054954eb051f35e74b75a566a96fe756015352c8 (xen: switch to
linear virtual mapped sparse p2m list).  This patch can be deemed
subsequent to Daniel's patch, and implements support Xen PV domU
dumpfiles for Linux 3.19 and later kernels.
(honglei.wang@oracle com)
This commit is contained in:
Dave Anderson 2017-05-24 11:50:35 -04:00
parent 8fef88b2b5
commit 5c52842a58
3 changed files with 140 additions and 15 deletions

151
kernel.c
View File

@ -22,6 +22,7 @@
#include <libgen.h>
#include <ctype.h>
#include <stdbool.h>
#include "xendump.h"
static void do_module_cmd(ulong, char *, ulong, char *, char *);
static void show_module_taint(void);
@ -67,6 +68,9 @@ static ulong __xen_m2p(ulonglong, ulong);
static ulong __xen_pvops_m2p_l2(ulonglong, ulong);
static ulong __xen_pvops_m2p_l3(ulonglong, ulong);
static ulong __xen_pvops_m2p_hyper(ulonglong, ulong);
static ulong __xen_pvops_m2p_domU(ulonglong, ulong);
static int read_xc_p2m(ulonglong, void *, long);
static void read_p2m(ulong, int, void *);
static int search_mapping_page(ulong, ulong *, ulong *, ulong *);
static void read_in_kernel_config_err(int, char *);
static void BUG_bytes_init(void);
@ -183,10 +187,7 @@ kernel_init()
&kt->pvops_xen.p2m_mid_missing);
get_symbol_data("p2m_missing", sizeof(ulong),
&kt->pvops_xen.p2m_missing);
} else if (symbol_exists("xen_p2m_addr")) {
if (!XEN_CORE_DUMPFILE())
error(FATAL, "p2m array in new format is unreadable.");
} else {
} else if (!symbol_exists("xen_p2m_addr")) {
kt->pvops_xen.p2m_top_entries = get_array_length("p2m_top", NULL, 0);
kt->pvops_xen.p2m_top = symbol_value("p2m_top");
kt->pvops_xen.p2m_missing = symbol_value("p2m_missing");
@ -9356,13 +9357,7 @@ __xen_m2p(ulonglong machine, ulong mfn)
if (memtype == PHYSADDR)
pc->curcmd_flags |= XEN_MACHINE_ADDR;
if (!readmem(kt->p2m_mapping_cache[c].mapping, memtype,
mp, PAGESIZE(), "phys_to_machine_mapping page (cached)",
RETURN_ON_ERROR))
error(FATAL, "cannot access "
"phys_to_machine_mapping page\n");
else
kt->last_mapping_read = kt->p2m_mapping_cache[c].mapping;
read_p2m(c, memtype, mp);
if (memtype == PHYSADDR)
pc->curcmd_flags &= ~XEN_MACHINE_ADDR;
@ -9400,9 +9395,12 @@ __xen_m2p(ulonglong machine, ulong mfn)
*/
if (symbol_exists("p2m_mid_missing"))
pfn = __xen_pvops_m2p_l3(machine, mfn);
else if (symbol_exists("xen_p2m_addr"))
pfn = __xen_pvops_m2p_hyper(machine, mfn);
else
else if (symbol_exists("xen_p2m_addr")) {
if (XEN_CORE_DUMPFILE())
pfn = __xen_pvops_m2p_hyper(machine, mfn);
else
pfn = __xen_pvops_m2p_domU(machine, mfn);
} else
pfn = __xen_pvops_m2p_l2(machine, mfn);
if (pfn != XEN_MFN_NOT_FOUND)
@ -9610,6 +9608,131 @@ __xen_pvops_m2p_hyper(ulonglong machine, ulong mfn)
return XEN_MFN_NOT_FOUND;
}
static void read_p2m(ulong cache_index, int memtype, void *buffer)
{
/*
* Use special read function for PV domain p2m reading.
* See the comments of read_xc_p2m().
*/
if (symbol_exists("xen_p2m_addr") && !XEN_CORE_DUMPFILE()) {
if (!read_xc_p2m(kt->p2m_mapping_cache[cache_index].mapping,
buffer, PAGESIZE()))
error(FATAL, "cannot access phys_to_machine_mapping page\n");
} else if (!readmem(kt->p2m_mapping_cache[cache_index].mapping, memtype,
buffer, PAGESIZE(), "phys_to_machine_mapping page (cached)",
RETURN_ON_ERROR))
error(FATAL, "cannot access phys_to_machine_mapping page\n");
kt->last_mapping_read = kt->p2m_mapping_cache[cache_index].mapping;
}
/*
* PV domain p2m mapping info is stored in xd->xfd at xch_index_offset. It
* is organized as struct xen_dumpcore_p2m and the pfns are progressively
* increased by 1 from 0.
*
* This is a special p2m reading function for xen PV domain vmcores after
* kernel commit 054954eb051f35e74b75a566a96fe756015352c8 (xen: switch
* to linear virtual mapped sparse p2m list). It is invoked for reading
* p2m associate stuff by read_p2m().
*/
static int read_xc_p2m(ulonglong addr, void *buffer, long size)
{
ulong i, new_p2m_buf_size;
off_t offset;
struct xen_dumpcore_p2m *new_p2m_buf;
static struct xen_dumpcore_p2m *p2m_buf;
static ulong p2m_buf_size = 0;
if (size <= 0) {
if ((CRASHDEBUG(1) && !STREQ(pc->curcmd, "search")) ||
CRASHDEBUG(2))
error(INFO, "invalid size request: %ld\n", size);
return FALSE;
}
/*
* We extract xen_dumpcore_p2m.gmfn and copy them into the
* buffer. So, we need temporary p2m_buf whose size is
* (size * (sizeof(struct xen_dumpcore_p2m) / sizeof(ulong)))
* to put xen_dumpcore_p2m structures read from xd->xfd.
*/
new_p2m_buf_size = size * (sizeof(struct xen_dumpcore_p2m) / sizeof(ulong));
if (p2m_buf_size != new_p2m_buf_size) {
p2m_buf_size = new_p2m_buf_size;
new_p2m_buf = realloc(p2m_buf, p2m_buf_size);
if (new_p2m_buf == NULL) {
free(p2m_buf);
error(FATAL, "cannot realloc p2m buffer\n");
}
p2m_buf = new_p2m_buf;
}
offset = addr * (sizeof(struct xen_dumpcore_p2m) / sizeof(ulong));
offset += xd->xc_core.header.xch_index_offset;
if (lseek(xd->xfd, offset, SEEK_SET) == -1)
error(FATAL,
"cannot lseek to xch_index_offset offset 0x%lx\n", offset);
if (read(xd->xfd, (void*)p2m_buf, p2m_buf_size) != p2m_buf_size)
error(FATAL,
"cannot read from xch_index_offset offset 0x%lx\n", offset);
for (i = 0; i < size / sizeof(ulong); i++)
*((ulong *)buffer + i) = p2m_buf[i].gmfn;
return TRUE;
}
static ulong
__xen_pvops_m2p_domU(ulonglong machine, ulong mfn)
{
ulong c, end, i, mapping, p, pfn, start;
/*
* xch_nr_pages is the number of pages of p2m mapping. It is composed
* of struct xen_dumpcore_p2m. The stuff we want to copy into the mapping
* page is mfn whose type is unsigned long.
* So actual number of p2m pages should be:
*
* xch_nr_pages / (sizeof(struct xen_dumpcore_p2m) / sizeof(ulong))
*/
for (p = 0;
p < xd->xc_core.header.xch_nr_pages /
(sizeof(struct xen_dumpcore_p2m) / sizeof(ulong));
++p) {
mapping = p * PAGESIZE();
if (mapping != kt->last_mapping_read) {
if (!read_xc_p2m(mapping, (void *)kt->m2p_page, PAGESIZE()))
error(FATAL, "cannot read the last mapping page\n");
kt->last_mapping_read = mapping;
}
kt->p2m_pages_searched++;
if (search_mapping_page(mfn, &i, &start, &end)) {
pfn = p * XEN_PFNS_PER_PAGE + i;
c = kt->p2m_cache_index;
if (CRASHDEBUG (1))
console("mfn: %lx (%llx) i: %ld pfn: %lx (%llx)\n",
mfn, machine, i, pfn, XEN_PFN_TO_PSEUDO(pfn));
kt->p2m_mapping_cache[c].start = start;
kt->p2m_mapping_cache[c].end = end;
kt->p2m_mapping_cache[c].mapping = mapping;
kt->p2m_mapping_cache[c].pfn = p * XEN_PFNS_PER_PAGE;
kt->p2m_cache_index = (c+1) % P2M_MAPPING_CACHE;
return pfn;
}
}
return XEN_MFN_NOT_FOUND;
}
/*
* Search for an mfn in the current mapping page, and if found,
* determine the range of contiguous mfns that it's contained

View File

@ -19,7 +19,7 @@
#include "xendump.h"
static struct xendump_data xendump_data = { 0 };
static struct xendump_data *xd = &xendump_data;
struct xendump_data *xd = &xendump_data;
static int xc_save_verify(char *);
static int xc_core_verify(char *, char *);

View File

@ -192,3 +192,5 @@ struct xen_dumpcore_p2m {
uint64_t pfn;
uint64_t gmfn;
};
extern struct xendump_data *xd;