-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathswap.c
856 lines (698 loc) · 22.1 KB
/
swap.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
// Kernel-mode swap API
#include "types.h"
#include "defs.h"
#include "param.h"
#include "memlayout.h"
#include "mmu.h"
#include "x86.h"
#include "proc.h"
#include "spinlock.h"
#include "swap.h"
#include "stat.h"
#include "fs.h"
#include "sleeplock.h"
#include "file.h"
// In full linux, these are variables and defined in include/linux/mmzone.h
// For the sake of simplicity, we'll define them here instead.
#define PAGES_LOW 100
#define PAGES_HIGH 1000
// Type will always be 0, but other types may also be added
struct swap_info_struct swap_info[1];
struct spinlock swaplock;
// Forward declarations
swp_entry_t get_swap_page(void);
int scan_swap_map(struct swap_info_struct*);
void lru_list_initialize();
void lru_cache_del(pte_t, uint);
#define SWAPFILE_CLUSTER 16
//#define SWAPFILE_CLUSTER 3
void kswapinit()
{
unsigned int swapmap_bytes_needed = SWAPFILE_PAGES * sizeof(unsigned short);
cprintf("kernel: Initializing swap info\n");
memset(&swap_info[0],0,sizeof(struct swap_info_struct));
swap_info[0].pages = swap_info[0].max = SWAPFILE_PAGES;
swap_info[0].max--;
swap_info[0].swap_map_pages = 1 + (swapmap_bytes_needed / PGSIZE);
swap_info[0].flags = SWP_WRITEOK;
swap_info[0].highest_bit = SWAPFILE_PAGES - 1;
swap_info[0].cluster_nr = SWAPFILE_CLUSTER;
initlock(&swaplock,"swaplock");
initlock(&swap_info[0].sdev_lock,"sdev_lock");
cprintf("kernel: swapmap bytes needed: %d\n",swapmap_bytes_needed);
cprintf("kernel: swapmap pages needed: %d\n",swap_info[0].swap_map_pages);
for (int x = 0; x < swap_info[0].swap_map_pages; x++)
{
// IMPORTANT NOTE:
// We assume here that kalloc'ed pages will happen in a sequence from high to low. This method should be executed early enough
// in xv6 initialization, so this should happen every time. Hence we can do a strightforward implementation.
//
// Example scenario:
// 4 pages needed for the swap map total, so 4 passes thru the loop
// 1st page allocated at 0x804FF000
// 2nd page allocated at 0x804FE000
// 3rd page allocated at 0x804FD000
// 4th page allocated at 0x804FC000. Swap map pointer set to 0x804FC000 since this is the last page.
//
// In this case we have the range for the swap map allocated from 0x804FC000 to 0x804FFFFF for a total of 16k
char *new_kalloc_page = kalloc();
memset(new_kalloc_page,0,PGSIZE);
cprintf("kernel: Allocating page for swap map at address 0x%p\n",new_kalloc_page);
if (x == swap_info[0].swap_map_pages - 1)
{
// Allocate & zero out this section of the map
swap_info[0].swap_map = (unsigned short*)new_kalloc_page;
cprintf("kernel: Swap map pointer set to address 0x%p\n",new_kalloc_page);
}
}
lru_list_initialize();
cprintf("kernel: Done initializing swap info\n");
}
void print_swap_map()
{
cprintf("Swap map(lb==%d, hb==%d):\n",swap_info[0].lowest_bit,swap_info[0].highest_bit);
for (int x = 0; x < SWAPFILE_PAGES; x++)
cprintf("%d ",swap_info[0].swap_map[x]);
cprintf("\n");
}
unsigned int swap_page_total_count()
{
return SWAPFILE_PAGES;
}
unsigned int swap_page_count()
{
return swap_info[0].pages;
}
inline unsigned int swap_refcount(unsigned long offset)
{
if (offset > SWAPFILE_PAGES)
panic("swap_refcount");
return swap_info[0].swap_map[offset];
}
int swap_duplicate(swp_entry_t entry)
{
unsigned int offset = SWP_OFFSET(entry);
if (offset > SWAPFILE_PAGES)
panic("swap_duplicate");
swap_list_lock();
++swap_info[0].swap_map[offset];
swap_list_unlock();
return swap_info[0].swap_map[offset];
}
int swap_entry_free(struct swap_info_struct *p, unsigned long offset)
{
int count = p->swap_map[offset];
if (count < SWAP_MAP_MAX) {
count--;
p->swap_map[offset] = count;
if (!count) {
if (offset < p->lowest_bit)
p->lowest_bit = offset;
if (offset > p->highest_bit)
p->highest_bit = offset;
p->pages++;
//cprintf("swap slot %d freed. # of swap pages: %d\n",offset,p->pages);
}
}
return count;
}
int swap_free(swp_entry_t entry)
{
int retval = 0;
struct swap_info_struct * p = &swap_info[0];
swap_list_lock();
retval = swap_entry_free(p, SWP_OFFSET(entry));
swap_list_unlock();
return retval;
}
int swap_free_nolocks(swp_entry_t entry)
// WARNING: Make sure to protect this call with locks!
{
return swap_entry_free(&swap_info[0], SWP_OFFSET(entry));
}
void free_swap_pages(struct proc *currproc)
// Frees all swap pages
{
//print_swap_map();
swap_list_lock();
// Standard page directory crawl
for (int index1 = 0; index1 < NPDENTRIES; index1++)
{
pde_t *pde = &(currproc->pgdir[index1]);
if (*pde & PTE_P && index1 < 512)
{
pde_t *pgtab = (pte_t*)P2V(PTE_ADDR(*pde));
for (int index2 = 11; index2 < NPTENTRIES; index2++)
{
if (!(pgtab[index2] & PTE_P) && (pgtab[index2] & PTE_U))
{
// Check if this page is swapped out
swp_entry_t this_entry = pte_to_swp_entry(pgtab[index2]);
uint offset = SWP_OFFSET(this_entry);
if (offset < SWAPFILE_PAGES && swap_info[0].swap_map[offset] != 0)
{
cprintf("process [%s] exiting. freeing slot entry %d. New refcount==%d\n",currproc->name,offset,swap_free_nolocks(this_entry));
}
}
}
}
}
swap_list_unlock();
}
int swap_out(pte_t *mapped_victim_pte, unsigned int offset)
// My own method (basically add_to_swap_cache() without the cache)
{
struct swap_info_struct *p = &swap_info[0];
int file_offset = offset + 1, retval = -1;
uint old_offset;
char *kernel_addr = P2V(PTE_ADDR(*mapped_victim_pte));
if (p->swap_file == NULL)
// SWAPFILE pointer not set yet with ksetswapfileptr() system call
return -1;
//cprintf("Writing page located at 0x%p to file with file pointer at address 0x%p from bytes %d to %d\n",
// kernel_addr,p->swap_file,(file_offset * PGSIZE), (file_offset * PGSIZE) + PGSIZE);
old_offset = p->swap_file->off;
// Quick and dirty hack for now. Need a lock-protected state variable later
myproc()->pages_swapped_out++;
// Write contents to swapfile
p->swap_file->off = (unsigned int)(file_offset * PGSIZE);
retval = filewrite(p->swap_file,kernel_addr,PGSIZE);
p->swap_file->off = old_offset;
return retval;
}
int swap_in(void *page_addr, unsigned int offset)
// My own method. Swap a page into main memory from the specified slot
{
struct swap_info_struct *p = &swap_info[0];
int file_offset = offset + 1, retval = -1;
uint old_offset;
if (p->swap_file == NULL)
// SWAPFILE pointer not set yet with ksetswapfileptr() system call
return -1;
old_offset = p->swap_file->off;
// Quick and dirty hack for now. Need a lock-protected state variable later
myproc()->pages_swapped_out--;
// Read contents from swapfile
p->swap_file->off = (unsigned int)(file_offset * PGSIZE);
retval = fileread(p->swap_file,page_addr,PGSIZE);
p->swap_file->off = old_offset;
return retval;
}
/* From linux 2.4 source code w/modifications */
swp_entry_t get_swap_page()
{
struct swap_info_struct * p;
unsigned long offset;
swp_entry_t entry;
int type;
entry.val = 0; /* Out of memory */
swap_list_lock();
type = 0;
if (type < 0)
goto out;
if (swap_info[0].pages <= 0)
goto out;
while (1) {
p = &swap_info[type];
if ((p->flags & SWP_WRITEOK) == SWP_WRITEOK) {
swap_device_lock(p);
//cprintf("before scan_swap_map\n");
offset = scan_swap_map(p);
//cprintf("after scan_swap_map. offset==%d\n",offset);
swap_device_unlock(p);
if (offset >= 0) {
entry = SWP_ENTRY(type,offset);
/* Don't need the type stuff below\
type = swap_info[type].next;
if (type < 0 ||
p->prio != swap_info[type].prio) {
swap_list.next = swap_list.head;
} else {
swap_list.next = type;
}
*/
goto out;
}
}
/*
type = p->next;
if (!wrapped) {
if (type < 0 || p->prio != swap_info[type].prio) {
type = swap_list.head;
wrapped = 1;
}
} else
if (type < 0)
goto out; // out of swap space
*/
}
out:
swap_list_unlock();
return entry;
}
inline void ksetswapfileptr(struct file *f)
{
cprintf("kernel: Swap file pointer set! Address of file pointer: 0x%p\n",f);
//cprintf("kernel: inode ptr: 0x%p\n",f->ip);
//cprintf("kernel: offset: 0x%p\n",f->off);
swap_info[0].swap_file = filedup(f);
}
inline int scan_swap_map(struct swap_info_struct *si)
{
unsigned long offset;
/*
* We try to cluster swap pages by allocating them
* sequentially in swap. Once we've allocated
* SWAPFILE_CLUSTER pages this way, however, we resort to
* first-free allocation, starting a new cluster. This
* prevents us from scattering swap pages all over the entire
* swap partition, so that we reduce overall disk seek times
* between swap pages. -- sct */
if (si->cluster_nr) {
while (si->cluster_next <= si->highest_bit) {
offset = si->cluster_next++;
//cprintf("in first if. si->cluster_next==%d, si->highest_bit==%d\n",si->cluster_next,si->highest_bit);
if (si->swap_map[offset])
continue;
si->cluster_nr--;
//cprintf("in first if. offset==%d\n",offset);
goto got_page;
}
}
si->cluster_nr = SWAPFILE_CLUSTER;
/* try to find an empty (even not aligned) cluster. */
offset = si->lowest_bit;
check_next_cluster:
if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit)
{
int nr;
for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++)
if (si->swap_map[nr])
{
offset = nr+1;
goto check_next_cluster;
}
/* We found a completly empty cluster, so start
* using it.
*/
goto got_page;
}
/* No luck, so now go finegrined as usual. -Andrea */
for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) {
if (si->swap_map[offset])
continue;
si->lowest_bit = offset+1;
got_page:
if (offset == si->lowest_bit)
si->lowest_bit++;
if (offset == si->highest_bit)
si->highest_bit--;
if (si->lowest_bit > si->highest_bit) {
si->lowest_bit = si->max;
si->highest_bit = 0;
}
si->swap_map[offset] = 1;
si->pages--;
si->cluster_next = offset+1;
return offset;
}
si->lowest_bit = si->max;
si->highest_bit = 0;
return 0;
}
// LRU section
struct lru_list_entry {
pte_t addr;
struct lru_list_entry *next;
};
struct lru_list_struct {
struct lru_list_entry *active_list;
struct lru_list_entry *inactive_list;
struct spinlock lru_lock;
unsigned long nr_active_pages;
unsigned long nr_inactive_pages;
};
// Note that we can have a large number of entries in the LRU list, which may overflow the kernel stack.
// As a result, we need a subsystem to manage the doling out of LRU entries to be used, which we will dub the LRU bank
// Needed to figure out how many pages to allocate for LRU list bank
// Assume 12 bytes for header (prev & next pointers and count)
#define LRU_HEADER_SIZE 12
#define LRU_ENTRIES_PER_PAGE ((PGSIZE - LRU_HEADER_SIZE) / sizeof(struct lru_list_entry))
struct lru_bank_page {
// Contains blocks of LRU entries to be given out when needed
struct lru_bank_page *prev;
struct lru_bank_page *next;
unsigned int used;
struct lru_list_entry slots[LRU_ENTRIES_PER_PAGE];
};
// Main container for LRU lists
struct lru_list_struct lru_list;
#define lru_list_lock() acquire(&lru_list.lru_lock);
#define lru_list_unlock() release(&lru_list.lru_lock);
struct lru_bank_page *lru_bank = NULL;
void lru_list_initialize()
{
cprintf("kernel: Initializing LRU list container.\n");
lru_bank = (struct lru_bank_page*)kalloc();
if (!lru_bank)
panic("Unable to allocate LRU bank!");
memset(lru_bank,0,sizeof(struct lru_bank_page));
cprintf("kernel: First page of LRU entry bank created at 0x%p\n", lru_bank);
cprintf("kernel: Initializing LRU active & inactive lists\n", lru_bank);
lru_list.active_list = NULL;
lru_list.inactive_list = NULL;
lru_list.nr_active_pages = 0;
lru_list.nr_inactive_pages = 0;
initlock(&lru_list.lru_lock,"lru_lock");
cprintf("kernel: LRU entries per page: %d\n", LRU_ENTRIES_PER_PAGE);
}
struct lru_list_entry *lru_bank_get_new()
// Returns a fresh LRU entry to be used
{
struct lru_list_entry *retval = NULL;
struct lru_bank_page *lb_curr = lru_bank;
struct lru_bank_page *lb_last = lb_curr;
while (lb_curr)
{
uint exit = 0;
if (lb_curr->used < LRU_ENTRIES_PER_PAGE)
{
// Take an entry from the current page
for (int x = 0; x < LRU_ENTRIES_PER_PAGE; x++)
{
if (lb_curr->slots[x].addr == 0)
// Found free entry
{
retval = &lb_curr->slots[x];
lb_curr->used++;
exit = 1;
break;
}
}
}
if (exit > 0)
break;
// No free entries found. Try next page
lb_last = lb_curr;
lb_curr = lb_curr->next;
}
if (retval == NULL)
// All LRU entries in all currently allocated bank pages are in use. Create a new bank page
{
if (lb_last == NULL)
panic("lru_bank_get_new(): lb_last != NULL assertion failed");
// Create new bank page
lb_curr = lb_last->next = (struct lru_bank_page*)kalloc();
lb_curr->prev = lb_last;
memset(lb_curr,0,PGSIZE);
// Assign new LRU item
retval = &lb_curr->slots[0];
lb_curr->used++;
}
//cprintf("kernel: Got new LRU entry at address 0x%p\n",retval);
retval->addr = 0;
retval->next = NULL;
return retval;
}
struct lru_bank_page *lru_bank_find_page(struct lru_list_entry *entry)
// Finds the LRU bank page of the associated entry. Should never return NULL
{
struct lru_bank_page *retval = NULL;
struct lru_bank_page *currpg = lru_bank;
while (currpg && retval == NULL)
{
if ((uint)entry >= (uint)currpg && (uint)entry < (uint)currpg + PGSIZE)
retval = currpg;
currpg = currpg->next;
}
return retval;
}
void lru_bank_release(struct lru_list_entry *target)
// Removes target entry and releases it back into the LRU bank. Does not change the next field
{
//cprintf("kernel: lru_bank_page() finding LRU entry with target==0x%p,target->addr==0x%p\n",target,target->addr);
struct lru_bank_page *currpg = lru_bank_find_page(target);
if (currpg == NULL)
panic("lru_bank_release()");
// Blank out this LRU entry and free it for use
target->addr = 0;
currpg->used--;
}
void lru_cache_add(pde_t addr, int pageHot)
// Add a cold page to the front of the inactive_list. Will be moved to active_list with a call to mark_page_accessed()
// if the page is known to be hot, such as when a page is faulted in (pageHot > 0).
{
cprintf("kernel: Adding %s pte at kernel address 0x%p to LRU cache\n",(pageHot == 0 ? "cold" : "hot"),addr);
lru_list_lock();
struct lru_list_entry *new_entry = lru_bank_get_new();
//cprintf("kernel: lru_cache_add(): new_entry==0x%p\n",new_entry);
new_entry->addr = addr;
if (pageHot <= 0)
{
// Add cold page to front of inactive list
new_entry->next = lru_list.inactive_list;
lru_list.inactive_list = new_entry;
lru_list.nr_inactive_pages++;
}
else
{
// Since we know the page is hot, put it in the front of the active list right now
new_entry->next = lru_list.active_list;
lru_list.active_list = new_entry;
lru_list.nr_active_pages++;
// *pte |= PTE_A; <----- Add accessed bit (reference purposes)
}
// Clear accessed bit (although this is pointless if the page is hot and this is called from trap.c as a page fault)
pte_t *pte = (pte_t*)addr;
*pte &= ~PTE_A;
lru_list_unlock();
}
void lru_cache_del(pte_t addr, uint rangeSize)
// Removes a page from the LRU lists by calling either del_page_from_active_list()
// or del_page_from_inactive_list(), whichever is appropriate.
// The parameter rangeSize deletes all addresses between (addr) and (addr + rangeSize). Set rangeSize to 0 to specify a single address
{
lru_list_lock();
struct lru_list_entry *curr = NULL;
struct lru_list_entry *prev = NULL;
// Search active list first
curr = lru_list.active_list;
prev = curr;
while (curr)
{
if (curr->addr >= addr && curr->addr <= ((uint)addr + rangeSize))
{
// Remove the entry
cprintf("kernel: Removing LRU entry for PTE at 0x%p from active list\n",*curr);
lru_bank_release(curr);
lru_list.nr_active_pages--;
if (lru_list.active_list == curr)
lru_list.active_list = curr->next;
else
prev->next = curr->next;
if (rangeSize == 0)
{
lru_list_unlock();
return;
}
}
prev = curr;
curr = curr->next;
}
// Now search inactive list
curr = lru_list.inactive_list;
prev = curr;
while (curr)
{
if (curr->addr >= addr && curr->addr <= ((uint)addr + rangeSize))
{
// Remove the entry
cprintf("kernel: Removing LRU entry for PTE at 0x%p from inactive list\n",*curr);
lru_bank_release(curr);
lru_list.nr_inactive_pages--;
if (lru_list.inactive_list == curr)
lru_list.inactive_list = curr->next;
else
prev->next = curr->next;
if (rangeSize == 0)
{
lru_list_unlock();
return;
}
}
prev = curr;
curr = curr->next;
}
lru_list_unlock();
return;
}
void activate_page(pte_t addr)
// Removes a page from the inactive_list and places it on active_list.
// It is very rarely called directly as the caller has to know the page is on inactive_list. mark_page_accessed() should be used instead
{
pte_t *pte = (pte_t*)addr;
// Clear accessed bit
*pte &= ~PTE_A;
lru_cache_del(addr,0);
lru_cache_add(addr,1);
}
void mark_page_accessed(pte_t addr)
// Mark that the page has been accessed. Implementation more complex in linux but here we just call activate_page()
{
activate_page(addr);
}
void print_lru()
// Method to list all pages in the LRU cache. Used for demonstration/debugging purposes
{
struct lru_list_entry *entry = lru_list.active_list;
unsigned int index = 0;
cprintf("*** LRU table ***\n");
cprintf("*** Active list ***\n");
while (entry)
{
pte_t *pte = (pte_t*)entry->addr;
cprintf("Entry #%d: PTE addr==0x%p, accessed bit: %d\n",index,entry->addr,(*pte & PTE_A));
index++;
entry = entry->next;
}
cprintf("*** Inactive list ***\n");
entry = lru_list.inactive_list;
index = 0;
while (entry)
{
pte_t *pte = (pte_t*)entry->addr;
cprintf("Entry #%d: PTE addr==0x%p, accessed bit: %d\n",index,entry->addr,(*pte & PTE_A));
index++;
entry = entry->next;
}
cprintf("*** End of LRU table ***\n");
}
void refill_inactive()
// Simplified version of the refill_inactive() method on linux. Here, we do everything in this method.
// Our goal is to make the active list comprise 2/3 of the total, and the inactive list the remaining 1/3
{
// nr_pages is the # of pages we want to swap out
unsigned long nr_pages = 1;
unsigned long ratio = 0;
unsigned long index = 0;
unsigned long total_lru_pages = lru_list.nr_active_pages + lru_list.nr_inactive_pages;
unsigned long active_target_count;
struct lru_list_entry *entry = lru_list.active_list;
// Get # of pages to move
// Per the docs, the active needs needs to be 2/3 the size of the inactive list
// Hence, the active list is 2/5 the total and the inactive 3/5 of the total
active_target_count = 2 * total_lru_pages;
active_target_count /= 5;
ratio = lru_list.nr_active_pages - active_target_count;
if (ratio < 0)
ratio = 0;
cprintf("kernel: refill_inactive() found: atc==%d, nr_pages==%d, active==%d, inactive==%d, ratio==%d \n",active_target_count, nr_pages,lru_list.nr_active_pages,lru_list.nr_inactive_pages,ratio);
cprintf("kernel: refill_inactive() moving %d pages from active to inactive list\n",ratio);
// Move the pages
while (entry != NULL)
{
// Check for pages which have been accessed
pte_t addr = entry->addr;
pte_t *pte = (pte_t*)entry->addr;
struct lru_list_entry *nextentry = entry->next;
uint is_active = (*pte & PTE_A);
// We're either going to move the page to the front of the active list or to the inactive list
// In either case, we'll delete the one we have now
if (!(is_active) && ratio > 0)
// Accessed bit is not set, so page isn't hot anymore. Move to inactive list
{
cprintf("kernel: moving active LRU entry to inactive list\n");
// Clear accessed bit
*pte &= ~PTE_A;
lru_cache_del(addr,0);
lru_cache_add(addr,0);
// Add accessed bit
//*pte |= PTE_A;
ratio--;
}
else if (is_active)
// Page is still hot, clear accessed bit & move to front of active list
{
cprintf("kernel: moving hot LRU entry to front of active list\n");
mark_page_accessed(entry->addr);
}
entry = nextentry;
index++;
}
}
void refill_active()
// Place any inactive pages accessed back into the active list
{
struct lru_list_entry *entry = lru_list.inactive_list;
while (entry != NULL)
{
// Check for pages which have been accessed
pte_t *pte = (pte_t*)entry->addr;
struct lru_list_entry *nextentry = entry->next;
// We're either going to move the page to the front of the active list or to the inactive list
// In either case, we'll delete the one we have now
if (*pte & PTE_A)
// Accessed bit is set, so page is now hot. Move to active list
{
cprintf("kernel: moving hot LRU entry to front of active list\n");
mark_page_accessed(entry->addr);
}
entry = nextentry;
}
}
void lru_rotate_lists()
// Rotates the active and inactive LRU lists loosely according to the simplified LRQ2 algorithm
{
refill_inactive();
refill_active();
}
void lru_remove_proc_pages(struct proc *currproc)
{
// Standard page directory crawl
for (int index1 = 0; index1 < NPDENTRIES; index1++)
{
pde_t *pde = &(currproc->pgdir[index1]);
if (*pde & PTE_P && index1 < 512)
{
pde_t *pgtab = (pte_t*)P2V(PTE_ADDR(*pde));
cprintf("kernel: Removing LRU entries between 0x%p and 0x%p.\n",pgtab,(uint)pgtab+PGSIZE);
lru_cache_del((uint)pgtab,PGSIZE);
}
}
}
unsigned int *get_victim_page()
// Returns the address of a page directory entry for the next victim page
{
struct lru_list_entry *curr = NULL;
struct lru_list_entry *victim_entry = NULL;
pte_t *pte = NULL;
// Try this # of times to rotate the list and find a victim.
int attempts = 2;
for (int index = 0; index < attempts; index++)
{
// Give us some fresh inactives
lru_rotate_lists();
curr = lru_list.inactive_list;
while (curr)
// This loop gets the last inactive entry that hasn't been accessed
{
pte = (pte_t*)curr->addr;
if (!(*pte & PTE_A) && (*pte & PTE_P))
// Page not accessed & is still present in memory. Make this the current victim
victim_entry = curr;
curr = curr->next;
}
if (victim_entry != NULL)
// No need for further attempts
break;
}
if (victim_entry == NULL)
// Unlikely (especially after a couple list rotations) and there are more sophisticated ways to deal with this. May implement if this happens often
panic("No LRU inactive pages present in physical memory");
else
{
pte = (pte_t*)victim_entry->addr;
return (unsigned int*)victim_entry->addr;
}
return 0;
}