- 論壇徽章:
- 0
|
1 下面是我注釋過的代碼,希望對大家有用,另外大家在使用時注意遵守GPL許可。
2 由于本人水平有限,注釋難免出錯,有問題歡迎討論sirouni@yahoo.com.cn或者直接向Xen的郵件列表提問 。
3 如果有時間我會整理一片短文。
4 原本想把整個shadow.c的注釋發(fā)上來,但是由于CU博客的限制,只能發(fā)成連載。謝謝大家的關(guān)注
/**************************************************************************/
/* Entry points into the shadow code */
/* 整個shadow code 的入口:
* sh_page_fault():
* sh_invlpg():
*/
/* Called from pagefault handler in Xen, and from the HVM trap handlers
* for pagefaults. Returns 1 if this fault was an artefact of the
* shadow code (and the guest should retry) or 0 if it is not (and the
* fault should be handled elsewhere or passed to the guest). */
/* 由Xen的#PF處理程序調(diào)用,或者由HVM #PF trap 處理程序調(diào)用,
* 如果這個fault是由shadow制造的假象(客戶操作系統(tǒng)需要retray)則返回1,
* 否則返回0(#PF需要在別的地方處理或者傳遞給客戶).
*/
/* 主要工作:
* 1) 遍歷客戶頁表的各個層次.
* 2) 根據(jù)error_code做出該#PF是否應由shadow code處理.
* 3) 模擬指令的執(zhí)行
*/
/*
* [ fix me ] 有時間的話看看MMIO
*/
static int sh_page_fault(struct vcpu *v,
unsigned long va,
struct cpu_user_regs *regs)
{
struct domain *d = v->domain;
walk_t gw;
u32 accumulated_gflags;
gfn_t gfn;
mfn_t gmfn, sl1mfn=_mfn(0);
shadow_l1e_t sl1e, *ptr_sl1e;
paddr_t gpa;
struct sh_emulate_ctxt emul_ctxt;
struct x86_emulate_ops *emul_ops;
int r, mmio;
fetch_type_t ft = 0;
SHADOW_PRINTK("d:v=%u:%u va=%#lx err=%u\n",
v->domain->domain_id, v->vcpu_id, va, regs->error_code);
perfc_incr(shadow_fault);
//
// XXX: Need to think about eventually mapping superpages directly in the
// shadow (when possible), as opposed to splintering them into a
// bunch of 4K maps.
//
// 從上面的注視可以看出當前的做法是將superpages映射到一些4K頁面
// 最終應該考慮將superpages直接映射的shadow
/* 對于64bit Xen/PAE 利用PTE的reserved bit實現(xiàn)的快速路徑,所以error code中會有PREC_reserved_bit位設(shè)置 */
#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH) && SHADOW_PAGING_LEVELS > 2
if ( (regs->error_code & PFEC_reserved_bit) )
{
/* The only reasons for reserved bits to be set in shadow entries
* are the two "magic" shadow_l1e entries. */
if ( likely((__copy_from_user(&sl1e,
(sh_linear_l1_table(v)
+ shadow_l1_linear_offset(va)),
sizeof(sl1e)) == 0)
&& sh_l1e_is_magic(sl1e)) )
{
if ( sh_l1e_is_gnp(sl1e) )
{
if ( likely(!is_hvm_domain(d) ||
paging_vcpu_mode_translate(v)) )
{ /* PV translated */
/* Not-present in a guest PT: pass to the guest as
* a not-present fault (by flipping two bits). */
ASSERT(regs->error_code & PFEC_page_present);
regs->error_code ^= (PFEC_reserved_bit|PFEC_page_present);
reset_early_unshadow(v);
perfc_incr(shadow_fault_fast_gnp);
SHADOW_PRINTK("fast path not-present\n");
return 0; /* 客戶不存在,要注入異常 */
}
else
{
/* Not-present in the P2M: MMIO */ /* [fix me??] 為什么? */
gpa = va;
}
}
else
{
/* Magic MMIO marker: extract gfn for MMIO address */ /* MMIO magic 標記 */
ASSERT(sh_l1e_is_mmio(sl1e));
gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e))))
PAGE_SHIFT)
| (va & ~PAGE_MASK);
}
perfc_incr(shadow_fault_fast_mmio);
SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
reset_early_unshadow(v);
handle_mmio(gpa);
return EXCRET_fault_fixed;
}
else
{
/* This should be exceptionally rare: another vcpu has fixed
* the tables between the fault and our reading the l1e.
* Retry and let the hardware give us the right fault next time. */
perfc_incr(shadow_fault_fast_fail);
SHADOW_PRINTK("fast path false alarm!\n");
return EXCRET_fault_fixed;
}
}
#endif /* SHOPT_FAST_FAULT_PATH */
/* Detect if this page fault happened while we were already in Xen
* doing a shadow operation. If that happens, the only thing we can
* do is let Xen's normal fault handlers try to fix it. In any case,
* a diagnostic trace of the fault will be more useful than
* a BUG() when we try to take the lock again. */
/* 檢查#PF是否在我們正在處理shadow時發(fā)生。
* 如果發(fā)生的話,我們唯一能做的事情是讓Xen正常的處理程序試圖修復.
* 在任何情況下,一個診斷的trace比一個BUG()更有效
*/
if ( unlikely(shadow_locked_by_me(d)) ) /* 遍歷客戶頁表層次,將相應的客戶頁表設(shè)位只讀 */
{
SHADOW_ERROR("Recursive shadow fault: lock was taken by %s\n",
d->arch.paging.shadow.locker_function);
return 0;
}
shadow_lock(d);
shadow_audit_tables(v);
if ( guest_walk_tables(v, va, &gw, 1) != 0 )
{
SHADOW_PRINTK("malformed guest pagetable!");
print_gw(&gw);
}
/* 與上面的shadow_autid_table()并不矛盾,注意他們的編譯開關(guān) */
sh_audit_gw(v, &gw);
// 首先檢查是否由于客戶自身引發(fā)的#PF
// We do not look at the gw->l1e, as that will not exist for superpages.
// Instead, we use the gw->eff_l1e...
//
// We need not check all the levels of the guest page table entries for
// present vs not-present, as the eff_l1e will always be not present if
// one of the higher level entries is not present.
//
// 我們并不查看gw->l1e, 因為對于superpage, 不存在.
// 相反我們使用gw->eff_l1e
//
// 我們也不必檢查所有層次的gle是否存在,
// 因為如果eff_l1e在高層entry不存在情況下總是不存在點的.
// (初始化gw的時候使用了memset(...,0).)
if ( unlikely(!(guest_l1e_get_flags(gw.eff_l1e) & _PAGE_PRESENT)) )
{ /* 客戶頁表l2e/l1e標記為 NOT PRESENT
* 對于guest_walk_tables()來說,l2e不存在的話 gw.eff_l1e=0 , 所以gw.eff_l1e&_PAGE_PRESENT也為0
*/
if ( is_hvm_domain(d) && !paging_vcpu_mode_translate(v) ) /* HVM domain,并未開啟分頁,使用P2M表,其中標記為Not present */
{
/* Not present in p2m map, means this is mmio */
gpa = va;
goto mmio;
}
perfc_incr(shadow_fault_bail_not_present);
goto not_a_shadow_fault; /* 由于客戶本頁表項不存在引發(fā)的缺頁 */
}
// All levels of the guest page table are now known to be present.
/* 這里已經(jīng)確定所有層次的guest page table 是存在的*/
accumulated_gflags = accumulate_guest_flags(v, &gw);
// Check for attempts to access supervisor-only pages from user mode,
// i.e. ring 3. Such errors are not caused or dealt with by the shadow
// code.
//
// 檢查是否由于試圖從用戶模式訪問特權(quán)頁面引發(fā)的#PF,
// 這種錯誤不應該由shadow code處理.
if ( (regs->error_code & PFEC_user_mode) &&
!(accumulated_gflags & _PAGE_USER) )
{
/* illegal user-mode access to supervisor-only page */
/* 用戶模式非法訪問特權(quán)頁面 */
perfc_incr(shadow_fault_bail_user_supervisor);
goto not_a_shadow_fault;
}
// Was it a write fault?
ft = ((regs->error_code & PFEC_write_access)
? ft_demand_write : ft_demand_read);
if ( ft == ft_demand_write )
{
if ( unlikely(!(accumulated_gflags & _PAGE_RW)) )
{ /* 客戶嘗試寫一個只讀的頁面 */
perfc_incr(shadow_fault_bail_ro_mapping);
goto not_a_shadow_fault;
}
}
else // must have been either an insn fetch or read fault 一定是由于指令預取或者read引發(fā)的fault
{
// Check for NX bit violations: attempts to execute code that is
// marked "do not execute". Such errors are not caused or dealt with
// by the shadow code.
//
// 檢查是否用于NX位引發(fā)的缺頁:
// 嘗試執(zhí)行被標記為"不可執(zhí)行"的代碼頁面.
// 這種錯誤不由shadow code引發(fā)而且不由shadow code 處理
if ( regs->error_code & PFEC_insn_fetch )
{
if ( accumulated_gflags & _PAGE_NX_BIT )
{
/* NX prevented this code fetch */
perfc_incr(shadow_fault_bail_nx);
goto not_a_shadow_fault;
}
}
}
// 到達這里就可以確定是由shadow引發(fā)的#PF
/* gfn僅僅是地址空間虛擬化的一個抽象*/
/* What mfn is the guest trying to access? */
gfn = guest_l1e_get_gfn(gw.eff_l1e);
gmfn = vcpu_gfn_to_mfn(v, gfn); /* 利用P2M表定位mfn */
mmio = (is_hvm_domain(d)
&& paging_vcpu_mode_translate(v)
&& mmio_space(gfn_to_paddr(gfn)));
if ( !mmio && !mfn_valid(gmfn) )
{ /* gmfn 不正確 */
perfc_incr(shadow_fault_bail_bad_gfn);
SHADOW_PRINTK("BAD gfn=%"SH_PRI_gfn" gmfn=%"PRI_mfn"\n",
gfn_x(gfn), mfn_x(gmfn));
goto not_a_shadow_fault;
}
/* Make sure there is enough free shadow memory to build a chain of
* shadow tables: one SHADOW_MAX_ORDER chunk will always be enough
* to allocate all we need. (We never allocate a top-level shadow
* on this path, only a 32b l1, pae l2+1 or 64b l3+2+1) */
/* 確保有足夠的空閑內(nèi)存構(gòu)建shadow tables 鏈:
* 一個SHADOW_MAX_ORDER chunk 總能滿足我們的要求.
* (我們從不分配一個頂級的shadow,在這個路徑上)
* 在客戶更新cr3或者分頁模式發(fā)生變更的時候分配頂級的shadow
*/
shadow_prealloc(d, SHADOW_MAX_ORDER);
/* Acquire the shadow. This must happen before we figure out the rights
* for the shadow entry, since we might promote a page here. */
/* 獲取或者建立相應的shadow entry
* 注意:尋址的結(jié)束總是從l1e中獲取物理基址+OFFSET得出
* 所以最后必須取得一個sl1e,獲得sl1e的地址.
*/
ptr_sl1e = shadow_get_and_create_l1e(v, &gw, &sl1mfn, ft);
if ( unlikely(ptr_sl1e == NULL) )
{
/* Couldn't get the sl1e! Since we know the guest entries
* are OK, this can only have been caused by a failed
* shadow_set_l*e(), which will have crashed the guest.
* Get out of the fault handler immediately. */
ASSERT(d->is_shutting_down);
unmap_walk(v, &gw);
shadow_unlock(d);
return 0;
}
/* Calculate the shadow entry and write it
* 計算l1 shadow entry 并寫入新的entry.
*/
l1e_propagate_from_guest(v, (gw.l1e) ? gw.l1e : &gw.eff_l1e, gw.l1mfn,
gmfn, &sl1e, ft, mmio);
r = shadow_set_l1e(v, ptr_sl1e, sl1e, sl1mfn);
#if SHADOW_OPTIMIZATIONS & SHOPT_PREFETCH
/* Prefetch some more shadow entries */
sh_prefetch(v, &gw, ptr_sl1e, sl1mfn);
#endif
/* Need to emulate accesses to page tables */
/* 需要模擬對客戶頁表的寫操作 */
if ( sh_mfn_is_a_page_table(gmfn) )
{
if ( ft == ft_demand_write )
{
perfc_incr(shadow_fault_emulate_write);
goto emulate;
}
else if ( shadow_mode_trap_reads(d) && ft == ft_demand_read )
{ /* Xen 暫時不支持讀頁表的模擬 */
perfc_incr(shadow_fault_emulate_read);
goto emulate;
}
}
if ( mmio )
{
gpa = guest_walk_to_gpa(&gw);
goto mmio;
}
perfc_incr(shadow_fault_fixed);
d->arch.paging.shadow.fault_count++;
reset_early_unshadow(v);
done:
sh_audit_gw(v, &gw);
unmap_walk(v, &gw);
SHADOW_PRINTK("fixed\n");
shadow_audit_tables(v);
shadow_unlock(d);
return EXCRET_fault_fixed; /* fault 已經(jīng)修復,需要replay */
emulate: /* 模擬對GPT的操作 */
if ( !shadow_mode_refcounts(d) || !guest_mode(regs) )
goto not_a_shadow_fault;
if ( is_hvm_domain(d) )
{
/*
* If we are in the middle of injecting an exception or interrupt then
* we should not emulate: it is not the instruction at %eip that caused
* the fault. Furthermore it is almost certainly the case the handler
* stack is currently considered to be a page table, so we should
* unshadow the faulting page before exiting.
*/
/* 如果我們在注入異;蛘咧袛嗟臅r候產(chǎn)生了#PF,
* 那么我們不應該模擬: 并非由于%eip的指令引發(fā)的#PF,
* 幾乎可以肯定的是: 處理程序棧所在頁面被認為是一個頁表,
* 所以我們在退出前unshadow faulting page.
*/
if ( unlikely(hvm_event_injection_faulted(v)) )
{
gdprintk(XENLOG_DEBUG, "write to pagetable during event "
"injection: cr2=%#lx, mfn=%#lx\n",
va, mfn_x(gmfn));
sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
goto done;
}
/* 保存客戶寄存器,用于下面的模擬之用 */
hvm_store_cpu_guest_regs(v, regs, NULL);
}
SHADOW_PRINTK("emulate: eip=%#lx esp=%#lx\n",
(unsigned long)regs->eip, (unsigned long)regs->esp);
emul_ops = shadow_init_emulation(&emul_ctxt, regs);
/*
* We do not emulate user writes. Instead we use them as a hint that the
* page is no longer a page table. This behaviour differs from native, but
* it seems very unlikely that any OS grants user access to page tables.
*/
/* 我們不模擬Guest 用戶對頁表的寫操作.
* 相反,我們將這種操作做為一種暗示:
* 要寫的頁面已經(jīng)不再做為頁表使用
* 這看起來有點問題,
* 但實際情況是沒有哪個操作系統(tǒng)會授權(quán)用戶訪問頁表.
*/
r = X86EMUL_UNHANDLEABLE;
if ( !(regs->error_code & PFEC_user_mode) )
/* [fix me??] 沒有看 */
r = x86_emulate(&emul_ctxt.ctxt, emul_ops); /* 模擬指令緩沖中指令的執(zhí)行 */
/*
* NB. We do not unshadow on X86EMUL_EXCEPTION. It's not clear that it
* would be a good unshadow hint. If we *do* decide to unshadow-on-fault
* then it must be 'failable': we cannot require the unshadow to succeed.
*/
if ( r == X86EMUL_UNHANDLEABLE )
{
SHADOW_PRINTK("emulator failure, unshadowing mfn %#lx\n",
mfn_x(gmfn));
perfc_incr(shadow_fault_emulate_failed);
/* If this is actually a page table, then we have a bug, and need
* to support more operations in the emulator. More likely,
* though, this is a hint that this page should not be shadowed. */
sh_remove_shadows(v, gmfn, 0 /* thorough */, 1 /* must succeed */);
}
/* Emulator has changed the user registers: write back */
/* 模擬器可能已經(jīng)改變用戶寄存器: 所以需要寫回 */
if ( is_hvm_domain(d) )
hvm_load_cpu_guest_regs(v, regs);
goto done;
mmio:
if ( !guest_mode(regs) )
goto not_a_shadow_fault;
perfc_incr(shadow_fault_mmio);
sh_audit_gw(v, &gw);
unmap_walk(v, &gw);
SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa);
shadow_audit_tables(v);
reset_early_unshadow(v);
shadow_unlock(d);
handle_mmio(gpa);
return EXCRET_fault_fixed;
/* 并非由shadow 引發(fā)的#PF */
not_a_shadow_fault:
sh_audit_gw(v, &gw);
unmap_walk(v, &gw);
SHADOW_PRINTK("not a shadow fault\n");
shadow_audit_tables(v);
reset_early_unshadow(v);
shadow_unlock(d);
return 0;
}
本文來自ChinaUnix博客,如果查看原文請點:http://blog.chinaunix.net/u/7949/showart_483353.html |
|