NMV Exercice 4

This commit is contained in:
Adrien Bourmault 2023-10-16 02:11:41 +02:00
parent 716e4cd880
commit 17744e00af
No known key found for this signature in database
GPG Key ID: 2974E1D5F25DFCC8
7 changed files with 523 additions and 25 deletions

View File

@ -20,7 +20,37 @@ Pour savoir si elle est terminale, TODO.
### Q3
fait.
```C
void print_pgt(paddr_t pml, uint8_t lvl) {
paddr_t *cur = (paddr_t *)(pml & PGT_ADDR_MASK);
if (lvl == 0)
return;
if (lvl == 4)
printk("[print_pgt]\n");
printk("\tPML%d @ 0x%lx exists\n",
lvl,
cur);
while (*cur) {
printk("\t\tPML%d[%lx] -> 0x%lx (FLAGS ",
lvl,
cur,
(uint64_t)(*cur) & PGT_ADDR_MASK);
if (*cur & PGT_USER_MASK)
printk("U");
if (*cur & PGT_WRITABLE_MASK)
printk("W");
if (*cur & PGT_VALID_MASK)
printk("P");
printk(")\n");
if (*cur & PGT_ADDR_MASK)
print_pgt(*cur, lvl - 1);
cur++;
}
}
```
## Exercice 2
@ -34,7 +64,99 @@ $$index = (vaddr & (0x1FF000 << (9 × (lvl-1)))) >> (12 + (9 × (lvl-1)))$$
### Q2
fait.
```C
void map_page(struct task *ctx, vaddr_t vaddr, paddr_t paddr)
{
uint64_t *cur_pml = NULL;
uint64_t *cr3_pml = NULL;
uint64_t n = 4;
// We have to create a new page table
if (!ctx->pgt) {
// Creating PML4
ctx->pgt = alloc_page();
cur_pml = (uint64_t *)ctx->pgt;
// Creating PML3
cur_pml[PGT_PML_INDEX(4, 0x0)] = alloc_page();
cur_pml[PGT_PML_INDEX(4, 0x0)] |= PGT_VALID_MASK
| PGT_WRITABLE_MASK
| PGT_USER_MASK;
// Getting PML3 entry in PML4 from CR3
cur_pml = (uint64_t *)(cur_pml[PGT_PML_INDEX(4, 0x0)]
& PGT_ADDR_MASK);
cr3_pml = (uint64_t *)(store_cr3()
& PGT_ADDR_MASK);
cr3_pml = (uint64_t *)(cr3_pml[PGT_PML_INDEX(4, 0x0)]
& PGT_ADDR_MASK);
// Duplicating PML2 entry in PML3
cur_pml[PGT_PML_INDEX(3, 0x0)] =
cr3_pml[PGT_PML_INDEX(3, 0x0)];
}
cur_pml = (uint64_t *)ctx->pgt;
/* printk("[map_page] trying to map 0x%lx -> 0x%lx\n", */
/* vaddr, paddr); */
// Exploring each level to the ground
while(n > 1) {
// Checking entry validity if level n > 1
// Also checking if ADDR != 0
if (cur_pml[PGT_PML_INDEX(n, vaddr)] & PGT_VALID_MASK
&& cur_pml[PGT_PML_INDEX(n, vaddr)] & PGT_ADDR_MASK) {
/* printk("[map_page] PML%d entry 0x%lx @ 0x%lx is valid (=0x%lx)\n", */
/* n, */
/* cur_pml[PGT_PML_INDEX(n, vaddr)], */
/* &cur_pml[PGT_PML_INDEX(n, vaddr)], */
/* cur_pml[PGT_PML_INDEX(n, vaddr)] */
/* & PGT_VALID_MASK); */
} else {
/* printk("[map_page] PML%d entry 0x%lx @ 0x%lx is NOT valid (=0x%lx)\n", */
/* n, */
/* cur_pml[PGT_PML_INDEX(n, vaddr)], */
/* &cur_pml[PGT_PML_INDEX(n, vaddr)], */
/* cur_pml[PGT_PML_INDEX(n, vaddr)] */
/* & PGT_VALID_MASK); */
cur_pml[PGT_PML_INDEX(n, vaddr)] = alloc_page();
/* printk("[map_page] allocated page at 0x%lx\n", */
/* cur_pml[PGT_PML_INDEX(n, vaddr)]); */
cur_pml[PGT_PML_INDEX(n, vaddr)] |= PGT_VALID_MASK
| PGT_WRITABLE_MASK
| PGT_USER_MASK;
}
// Computing the next PML_n entry
cur_pml = (uint64_t *)(cur_pml[PGT_PML_INDEX(n, vaddr)] & PGT_ADDR_MASK);
n--;
}
// Mapping, we're n == 1
// Checking validity and addr != 0
if (cur_pml[PGT_PML_INDEX(n, vaddr)] & PGT_VALID_MASK) {
printk("[map_page] ERR : vaddr 0x%lx WAS ALREADY mapped to 0x%lx !!!\n",
vaddr,
cur_pml[PGT_PML_INDEX(n, vaddr)]
& PGT_ADDR_MASK);
return;
} else {
cur_pml[PGT_PML_INDEX(n, vaddr)] = paddr | PGT_VALID_MASK
| PGT_WRITABLE_MASK
| PGT_USER_MASK;
/* printk("[map_page] vaddr 0x%lx mapped to 0x%lx\n", */
/* vaddr, */
/* cur_pml[PGT_PML_INDEX(n, vaddr)] */
/* & PGT_ADDR_MASK); */
return;
}
}
```
## Exercice 3
@ -94,3 +216,60 @@ On va donc avoir en commun :
### Q3
Le payload se situe à partir de l'adresse 0x2000000000. L'adresse de début du payload sera `0x2000000000` et sa fin à `0x2000000000 + ctx->load_end_paddr - ctx->load_paddr`, qui constituera le début du bss, qui lui se termine à `ctx->bss_end_vaddr`.
### Q4
```C
void load_task(struct task *ctx)
{
uint64_t *cur_vaddr = NULL;
uint64_t *end_vaddr = NULL;
cur_vaddr = (uint64_t *)ctx->load_vaddr;
end_vaddr = (uint64_t *)(ctx->load_vaddr
+ ctx->load_end_paddr
- ctx->load_paddr);
// Allocating payload pages
while (cur_vaddr < end_vaddr) {
map_page(ctx, (vaddr_t)cur_vaddr, alloc_page());
cur_vaddr += PGT_PAGE_SIZE;
}
//printk("\tfinished map payload\n");
// Allocating bss
while ((vaddr_t)cur_vaddr < ctx->bss_end_vaddr) {
map_page(ctx, (vaddr_t)cur_vaddr, alloc_page());
}
// Mapping one stack page
map_page(ctx, (vaddr_t)ctx->context.rsp - PGT_PAGE_SIZE, alloc_page());
printk("[load_task] task from @0x%lx loaded\n", ctx->load_paddr);
}
```
### Q5
```C
void set_task(struct task *ctx)
{
uint64_t *cur_vaddr = (uint64_t *)(ctx->load_vaddr
+ ctx->load_end_paddr
- ctx->load_paddr);
load_cr3(ctx->pgt);
printk("[set_task] new PML4 loaded @ 0x%lx\n", ctx->pgt);
// Zeroing page
while((vaddr_t)cur_vaddr < ctx->bss_end_vaddr) {
*cur_vaddr = (uint64_t)0;
cur_vaddr++;
}
printk("[set_task] task set @ 0x%lx (PML4 at 0x%lx)\n",
ctx->load_paddr, ctx->pgt);
}
```

View File

@ -20,7 +20,37 @@ Pour savoir si elle est terminale, TODO.
### Q3
fait.
```C
void print_pgt(paddr_t pml, uint8_t lvl) {
paddr_t *cur = (paddr_t *)(pml & PGT_ADDR_MASK);
if (lvl == 0)
return;
if (lvl == 4)
printk("[print_pgt]\n");
printk("\tPML%d @ 0x%lx exists\n",
lvl,
cur);
while (*cur) {
printk("\t\tPML%d[%lx] -> 0x%lx (FLAGS ",
lvl,
cur,
(uint64_t)(*cur) & PGT_ADDR_MASK);
if (*cur & PGT_USER_MASK)
printk("U");
if (*cur & PGT_WRITABLE_MASK)
printk("W");
if (*cur & PGT_VALID_MASK)
printk("P");
printk(")\n");
if (*cur & PGT_ADDR_MASK)
print_pgt(*cur, lvl - 1);
cur++;
}
}
```
## Exercice 2
@ -34,6 +64,212 @@ $$index = (vaddr & (0x1FF000 << (9 × (lvl-1)))) >> (12 + (9 × (lvl-1)))$$
### Q2
```C
void map_page(struct task *ctx, vaddr_t vaddr, paddr_t paddr)
{
uint64_t *cur_pml = NULL;
uint64_t *cr3_pml = NULL;
uint64_t n = 4;
// We have to create a new page table
if (!ctx->pgt) {
// Creating PML4
ctx->pgt = alloc_page();
cur_pml = (uint64_t *)ctx->pgt;
// Creating PML3
cur_pml[PGT_PML_INDEX(4, 0x0)] = alloc_page();
cur_pml[PGT_PML_INDEX(4, 0x0)] |= PGT_VALID_MASK
| PGT_WRITABLE_MASK
| PGT_USER_MASK;
// Getting PML3 entry in PML4 from CR3
cur_pml = (uint64_t *)(cur_pml[PGT_PML_INDEX(4, 0x0)]
& PGT_ADDR_MASK);
cr3_pml = (uint64_t *)(store_cr3()
& PGT_ADDR_MASK);
cr3_pml = (uint64_t *)(cr3_pml[PGT_PML_INDEX(4, 0x0)]
& PGT_ADDR_MASK);
// Duplicating PML2 entry in PML3
cur_pml[PGT_PML_INDEX(3, 0x0)] =
cr3_pml[PGT_PML_INDEX(3, 0x0)];
}
cur_pml = (uint64_t *)ctx->pgt;
/* printk("[map_page] trying to map 0x%lx -> 0x%lx\n", */
/* vaddr, paddr); */
// Exploring each level to the ground
while(n > 1) {
// Checking entry validity if level n > 1
// Also checking if ADDR != 0
if (cur_pml[PGT_PML_INDEX(n, vaddr)] & PGT_VALID_MASK
&& cur_pml[PGT_PML_INDEX(n, vaddr)] & PGT_ADDR_MASK) {
/* printk("[map_page] PML%d entry 0x%lx @ 0x%lx is valid (=0x%lx)\n", */
/* n, */
/* cur_pml[PGT_PML_INDEX(n, vaddr)], */
/* &cur_pml[PGT_PML_INDEX(n, vaddr)], */
/* cur_pml[PGT_PML_INDEX(n, vaddr)] */
/* & PGT_VALID_MASK); */
} else {
/* printk("[map_page] PML%d entry 0x%lx @ 0x%lx is NOT valid (=0x%lx)\n", */
/* n, */
/* cur_pml[PGT_PML_INDEX(n, vaddr)], */
/* &cur_pml[PGT_PML_INDEX(n, vaddr)], */
/* cur_pml[PGT_PML_INDEX(n, vaddr)] */
/* & PGT_VALID_MASK); */
cur_pml[PGT_PML_INDEX(n, vaddr)] = alloc_page();
/* printk("[map_page] allocated page at 0x%lx\n", */
/* cur_pml[PGT_PML_INDEX(n, vaddr)]); */
cur_pml[PGT_PML_INDEX(n, vaddr)] |= PGT_VALID_MASK
| PGT_WRITABLE_MASK
| PGT_USER_MASK;
}
// Computing the next PML_n entry
cur_pml = (uint64_t *)(cur_pml[PGT_PML_INDEX(n, vaddr)] & PGT_ADDR_MASK);
n--;
}
// Mapping, we're n == 1
// Checking validity and addr != 0
if (cur_pml[PGT_PML_INDEX(n, vaddr)] & PGT_VALID_MASK) {
printk("[map_page] ERR : vaddr 0x%lx WAS ALREADY mapped to 0x%lx !!!\n",
vaddr,
cur_pml[PGT_PML_INDEX(n, vaddr)]
& PGT_ADDR_MASK);
return;
} else {
cur_pml[PGT_PML_INDEX(n, vaddr)] = paddr | PGT_VALID_MASK
| PGT_WRITABLE_MASK
| PGT_USER_MASK;
/* printk("[map_page] vaddr 0x%lx mapped to 0x%lx\n", */
/* vaddr, */
/* cur_pml[PGT_PML_INDEX(n, vaddr)] */
/* & PGT_ADDR_MASK); */
return;
}
}
```
## Exercice 3
### Q1
L'adresse 0x2000000030 correspond à la zone de pile (stack) utilisateur,
on peut donc supposer que la faute de page est provoquée par un appel de fonction
effectué depuis le code utilisateur.
### Q2
+----------------------+ 0xffffffffffffffff
| Higher half |
| (unused) |
+----------------------+ 0xffff800000000000
| (impossible address) |
+----------------------+ 0x00007fffffffffff
| User |
| (text + data + heap) |
+----------------------+ 0x2000000000
| User |
| (stack) |
+----------------------+ 0x40000000
| Kernel |
| (valloc) |
+----------------------+ 0x201000
| Kernel |
| (APIC) |
+----------------------+ 0x200000
| Kernel |
| (text + data) |
+----------------------+ 0x100000
| Kernel |
| (BIOS + VGA) |
+----------------------+ 0x0
Pour toutes les tâches, les zones communes sont évidemment celles du noyau (car
il faut pouvoir effectuer des appels système notamment, avoir un buffer VGA
fonctionnel).
On va donc avoir en commun :
+----------------------+ 0x40000000
| Kernel |
| (valloc) |
+----------------------+ 0x201000
| Kernel |
| (APIC) |
+----------------------+ 0x200000
| Kernel |
| (text + data) |
+----------------------+ 0x100000
| Kernel |
| (BIOS + VGA) |
+----------------------+ 0x0
### Q3
Le payload se situe à partir de l'adresse 0x2000000000. L'adresse de début du payload sera `0x2000000000` et sa fin à `0x2000000000 + ctx->load_end_paddr - ctx->load_paddr`, qui constituera le début du bss, qui lui se termine à `ctx->bss_end_vaddr`.
### Q4
```C
void load_task(struct task *ctx)
{
uint64_t *cur_vaddr = NULL;
uint64_t *end_vaddr = NULL;
cur_vaddr = (uint64_t *)ctx->load_vaddr;
end_vaddr = (uint64_t *)(ctx->load_vaddr
+ ctx->load_end_paddr
- ctx->load_paddr);
// Allocating payload pages
while (cur_vaddr < end_vaddr) {
map_page(ctx, (vaddr_t)cur_vaddr, alloc_page());
cur_vaddr += PGT_PAGE_SIZE;
}
//printk("\tfinished map payload\n");
// Allocating bss
while ((vaddr_t)cur_vaddr < ctx->bss_end_vaddr) {
map_page(ctx, (vaddr_t)cur_vaddr, alloc_page());
}
// Mapping one stack page
map_page(ctx, (vaddr_t)ctx->context.rsp - PGT_PAGE_SIZE, alloc_page());
printk("[load_task] task from @0x%lx loaded\n", ctx->load_paddr);
}
```
### Q5
```C
void set_task(struct task *ctx)
{
uint64_t *cur_vaddr = (uint64_t *)(ctx->load_vaddr
+ ctx->load_end_paddr
- ctx->load_paddr);
load_cr3(ctx->pgt);
printk("[set_task] new PML4 loaded @ 0x%lx\n", ctx->pgt);
// Zeroing page
while((vaddr_t)cur_vaddr < ctx->bss_end_vaddr) {
*cur_vaddr = (uint64_t)0;
cur_vaddr++;
}
printk("[set_task] task set @ 0x%lx (PML4 at 0x%lx)\n",
ctx->load_paddr, ctx->pgt);
}
```

View File

@ -3,7 +3,7 @@ BIN := bin/
AS := gcc
ASFLAGS := -Wall -Wextra -O2 -nostdlib -nodefaultlibs
CC := gcc
CC := gcc -g
CCFLAGS := -Wall -Wextra -O2 -nostdlib -nodefaultlibs -fno-builtin \
-fno-stack-protector -Wno-implicit-fallthrough -mno-sse -mno-mmx
LD := ld
@ -149,4 +149,17 @@ clean:
$(Q)rm -rf $(OBJ) $(BIN) 2> /dev/null || true
gdb: all $(BIN)rackdoll.iso
$(call cmd-print, BOOT $(BIN)rackdoll.iso)
$(Q)qemu-system-x86_64 -smp 1 -m 4G \
-drive file=$(BIN)rackdoll.iso,format=raw -monitor stdio \
-no-reboot -no-shutdown -d \
cpu_reset,guest_errors,pcall,int -s -S 2> qemu.log &
$(Q)gdb \
-ex "set arch i386:x86-64:intel" \
-ex "target remote localhost:1234" \
-ex "symbol-file $(BIN)rackdoll.elf" \
-ex "break main_multiboot2"
.SECONDARY:

View File

@ -11,7 +11,6 @@ paddr_t alloc_page(void); /* Allocate a physical page identity mapped */
void free_page(paddr_t addr); /* Release a page allocated with alloc_page() */
void map_page(struct task *ctx, vaddr_t vaddr, paddr_t paddr);
void load_task(struct task *ctx);

View File

@ -58,6 +58,17 @@
#define RFLAGS_VIP (1ul << 20)
#define RFLAGS_ID (1ul << 21)
/*
* Memory
*/
#define PGT_VALID_MASK (1 << 0)
#define PGT_WRITABLE_MASK (1 << 1)
#define PGT_USER_MASK (1 << 2)
#define PGT_ADDR_MASK (0xFFFFFFFFFFFFF000)
#define PGT_PML_INDEX(n, vaddr) (vaddr & (0x1FF000ul << (9ul * (n-1ul)))) >> (12ul + (9ul * (n-1ul)))
#define PGT_PAGE_SIZE 4096
static inline void load_rsp(uint64_t rsp)
{
@ -172,13 +183,5 @@ static inline uint64_t rdmsr(uint32_t msr)
asm volatile ("rdmsr" : "=a" (eax), "=d" (edx) : "c" (msr));
return (((uint64_t) edx) << 32) | eax;
}
#define PGT_VALID_MASK (1 << 0)
#define PGT_WRITABLE_MASK (1 << 1)
#define PGT_USER_MASK (1 << 2)
#define PGT_ADDR_MASK (0xFFFFFFFFFFFFF000)
#define PGT_PML_INDEX(n, vaddr) (vaddr & (0x1FF000 << (9 * (n-1)))) >> (12 + (9 * (n-1)))
#endif

View File

@ -82,3 +82,4 @@ void main_multiboot2(void *mb2)
printk("\nGoodbye!\n"); /* fairewell */
die(); /* the work is done, we can die now... */
}

View File

@ -104,11 +104,40 @@ void free_page(paddr_t addr)
void map_page(struct task *ctx, vaddr_t vaddr, paddr_t paddr)
{
uint64_t *cur_pml = (uint64_t *)ctx->pgt;
int n = 4;
uint64_t *cur_pml = NULL;
uint64_t *cr3_pml = NULL;
uint64_t n = 4;
printk("[map_page] trying to map 0x%lx -> 0x%lx\n",
vaddr, paddr);
// We have to create a new page table
if (!ctx->pgt) {
// Creating PML4
ctx->pgt = alloc_page();
cur_pml = (uint64_t *)ctx->pgt;
// Creating PML3
cur_pml[PGT_PML_INDEX(4, 0x0)] = alloc_page();
cur_pml[PGT_PML_INDEX(4, 0x0)] |= PGT_VALID_MASK
| PGT_WRITABLE_MASK
| PGT_USER_MASK;
// Getting PML3 entry in PML4 from CR3
cur_pml = (uint64_t *)(cur_pml[PGT_PML_INDEX(4, 0x0)]
& PGT_ADDR_MASK);
cr3_pml = (uint64_t *)(store_cr3()
& PGT_ADDR_MASK);
cr3_pml = (uint64_t *)(cr3_pml[PGT_PML_INDEX(4, 0x0)]
& PGT_ADDR_MASK);
// Duplicating PML2 entry in PML3
cur_pml[PGT_PML_INDEX(3, 0x0)] =
cr3_pml[PGT_PML_INDEX(3, 0x0)];
}
cur_pml = (uint64_t *)ctx->pgt;
/* printk("[map_page] trying to map 0x%lx -> 0x%lx\n", */
/* vaddr, paddr); */
// Exploring each level to the ground
while(n > 1) {
@ -132,8 +161,8 @@ void map_page(struct task *ctx, vaddr_t vaddr, paddr_t paddr)
cur_pml[PGT_PML_INDEX(n, vaddr)] = alloc_page();
printk("[map_page] Allocated page at 0x%lx\n",
cur_pml[PGT_PML_INDEX(n, vaddr)]);
/* printk("[map_page] allocated page at 0x%lx\n", */
/* cur_pml[PGT_PML_INDEX(n, vaddr)]); */
cur_pml[PGT_PML_INDEX(n, vaddr)] |= PGT_VALID_MASK
| PGT_WRITABLE_MASK
@ -147,7 +176,7 @@ void map_page(struct task *ctx, vaddr_t vaddr, paddr_t paddr)
// Mapping, we're n == 1
// Checking validity and addr != 0
if (cur_pml[PGT_PML_INDEX(n, vaddr)] & PGT_VALID_MASK) {
printk("[map_page] vaddr 0x%lx WAS ALREADY mapped to 0x%lx !!!\n",
printk("[map_page] ERR : vaddr 0x%lx WAS ALREADY mapped to 0x%lx !!!\n",
vaddr,
cur_pml[PGT_PML_INDEX(n, vaddr)]
& PGT_ADDR_MASK);
@ -157,20 +186,58 @@ void map_page(struct task *ctx, vaddr_t vaddr, paddr_t paddr)
| PGT_WRITABLE_MASK
| PGT_USER_MASK;
printk("[map_page] vaddr 0x%lx mapped to 0x%lx\n",
vaddr,
cur_pml[PGT_PML_INDEX(n, vaddr)]
& PGT_ADDR_MASK);
/* printk("[map_page] vaddr 0x%lx mapped to 0x%lx\n", */
/* vaddr, */
/* cur_pml[PGT_PML_INDEX(n, vaddr)] */
/* & PGT_ADDR_MASK); */
return;
}
}
void load_task(struct task *ctx)
{
uint64_t *cur_vaddr = NULL;
uint64_t *end_vaddr = NULL;
cur_vaddr = (uint64_t *)ctx->load_vaddr;
end_vaddr = (uint64_t *)(ctx->load_vaddr
+ ctx->load_end_paddr
- ctx->load_paddr);
// Allocating payload pages
while (cur_vaddr < end_vaddr) {
map_page(ctx, (vaddr_t)cur_vaddr, alloc_page());
cur_vaddr += PGT_PAGE_SIZE;
}
//printk("\tfinished map payload\n");
// Allocating bss
while ((vaddr_t)cur_vaddr < ctx->bss_end_vaddr) {
map_page(ctx, (vaddr_t)cur_vaddr, alloc_page());
}
// Mapping one stack page
map_page(ctx, (vaddr_t)ctx->context.rsp - PGT_PAGE_SIZE, alloc_page());
printk("[load_task] task from @0x%lx loaded\n", ctx->load_paddr);
}
void set_task(struct task *ctx)
{
uint64_t *cur_vaddr = (uint64_t *)(ctx->load_vaddr
+ ctx->load_end_paddr
- ctx->load_paddr);
load_cr3(ctx->pgt);
printk("[set_task] new PML4 loaded @ 0x%lx\n", ctx->pgt);
// Zeroing page
while((vaddr_t)cur_vaddr < ctx->bss_end_vaddr) {
*cur_vaddr = (uint64_t)0;
cur_vaddr++;
}
printk("[set_task] task set @ 0x%lx (PML4 at 0x%lx)\n",
ctx->load_paddr, ctx->pgt);
}
void mmap(struct task *ctx, vaddr_t vaddr)