15
15
static DEFINE_MUTEX (damon_lock );
16
16
static int nr_running_ctxs ;
17
17
18
+ /*
19
+ * Construct a damon_region struct
20
+ *
21
+ * Returns the pointer to the new struct if success, or NULL otherwise
22
+ */
23
+ struct damon_region * damon_new_region (unsigned long start , unsigned long end )
24
+ {
25
+ struct damon_region * region ;
26
+
27
+ region = kmalloc (sizeof (* region ), GFP_KERNEL );
28
+ if (!region )
29
+ return NULL ;
30
+
31
+ region -> ar .start = start ;
32
+ region -> ar .end = end ;
33
+ region -> nr_accesses = 0 ;
34
+ INIT_LIST_HEAD (& region -> list );
35
+
36
+ return region ;
37
+ }
38
+
39
+ /*
40
+ * Add a region between two other regions
41
+ */
42
+ inline void damon_insert_region (struct damon_region * r ,
43
+ struct damon_region * prev , struct damon_region * next )
44
+ {
45
+ __list_add (& r -> list , & prev -> list , & next -> list );
46
+ }
47
+
48
+ void damon_add_region (struct damon_region * r , struct damon_target * t )
49
+ {
50
+ list_add_tail (& r -> list , & t -> regions_list );
51
+ }
52
+
53
+ static void damon_del_region (struct damon_region * r )
54
+ {
55
+ list_del (& r -> list );
56
+ }
57
+
58
+ static void damon_free_region (struct damon_region * r )
59
+ {
60
+ kfree (r );
61
+ }
62
+
63
+ void damon_destroy_region (struct damon_region * r )
64
+ {
65
+ damon_del_region (r );
66
+ damon_free_region (r );
67
+ }
68
+
69
+ /*
70
+ * Construct a damon_target struct
71
+ *
72
+ * Returns the pointer to the new struct if success, or NULL otherwise
73
+ */
74
+ struct damon_target * damon_new_target (unsigned long id )
75
+ {
76
+ struct damon_target * t ;
77
+
78
+ t = kmalloc (sizeof (* t ), GFP_KERNEL );
79
+ if (!t )
80
+ return NULL ;
81
+
82
+ t -> id = id ;
83
+ INIT_LIST_HEAD (& t -> regions_list );
84
+
85
+ return t ;
86
+ }
87
+
88
+ void damon_add_target (struct damon_ctx * ctx , struct damon_target * t )
89
+ {
90
+ list_add_tail (& t -> list , & ctx -> region_targets );
91
+ }
92
+
93
+ static void damon_del_target (struct damon_target * t )
94
+ {
95
+ list_del (& t -> list );
96
+ }
97
+
98
+ void damon_free_target (struct damon_target * t )
99
+ {
100
+ struct damon_region * r , * next ;
101
+
102
+ damon_for_each_region_safe (r , next , t )
103
+ damon_free_region (r );
104
+ kfree (t );
105
+ }
106
+
107
+ void damon_destroy_target (struct damon_target * t )
108
+ {
109
+ damon_del_target (t );
110
+ damon_free_target (t );
111
+ }
112
+
18
113
struct damon_ctx * damon_new_ctx (void )
19
114
{
20
115
struct damon_ctx * ctx ;
@@ -32,15 +127,27 @@ struct damon_ctx *damon_new_ctx(void)
32
127
33
128
mutex_init (& ctx -> kdamond_lock );
34
129
35
- ctx -> target = NULL ;
130
+ INIT_LIST_HEAD ( & ctx -> region_targets ) ;
36
131
37
132
return ctx ;
38
133
}
39
134
40
- void damon_destroy_ctx (struct damon_ctx * ctx )
135
+ static void damon_destroy_targets (struct damon_ctx * ctx )
41
136
{
42
- if (ctx -> primitive .cleanup )
137
+ struct damon_target * t , * next_t ;
138
+
139
+ if (ctx -> primitive .cleanup ) {
43
140
ctx -> primitive .cleanup (ctx );
141
+ return ;
142
+ }
143
+
144
+ damon_for_each_target_safe (t , next_t , ctx )
145
+ damon_destroy_target (t );
146
+ }
147
+
148
+ void damon_destroy_ctx (struct damon_ctx * ctx )
149
+ {
150
+ damon_destroy_targets (ctx );
44
151
kfree (ctx );
45
152
}
46
153
@@ -217,6 +324,21 @@ static bool kdamond_aggregate_interval_passed(struct damon_ctx *ctx)
217
324
ctx -> aggr_interval );
218
325
}
219
326
327
+ /*
328
+ * Reset the aggregated monitoring results ('nr_accesses' of each region).
329
+ */
330
+ static void kdamond_reset_aggregated (struct damon_ctx * c )
331
+ {
332
+ struct damon_target * t ;
333
+
334
+ damon_for_each_target (t , c ) {
335
+ struct damon_region * r ;
336
+
337
+ damon_for_each_region (r , t )
338
+ r -> nr_accesses = 0 ;
339
+ }
340
+ }
341
+
220
342
/*
221
343
* Check whether it is time to check and apply the target monitoring regions
222
344
*
@@ -238,6 +360,7 @@ static bool kdamond_need_update_primitive(struct damon_ctx *ctx)
238
360
*/
239
361
static bool kdamond_need_stop (struct damon_ctx * ctx )
240
362
{
363
+ struct damon_target * t ;
241
364
bool stop ;
242
365
243
366
mutex_lock (& ctx -> kdamond_lock );
@@ -249,7 +372,12 @@ static bool kdamond_need_stop(struct damon_ctx *ctx)
249
372
if (!ctx -> primitive .target_valid )
250
373
return false;
251
374
252
- return !ctx -> primitive .target_valid (ctx -> target );
375
+ damon_for_each_target (t , ctx ) {
376
+ if (ctx -> primitive .target_valid (t ))
377
+ return false;
378
+ }
379
+
380
+ return true;
253
381
}
254
382
255
383
static void set_kdamond_stop (struct damon_ctx * ctx )
@@ -265,6 +393,8 @@ static void set_kdamond_stop(struct damon_ctx *ctx)
265
393
static int kdamond_fn (void * data )
266
394
{
267
395
struct damon_ctx * ctx = (struct damon_ctx * )data ;
396
+ struct damon_target * t ;
397
+ struct damon_region * r , * next ;
268
398
269
399
mutex_lock (& ctx -> kdamond_lock );
270
400
pr_info ("kdamond (%d) starts\n" , ctx -> kdamond -> pid );
@@ -291,6 +421,7 @@ static int kdamond_fn(void *data)
291
421
if (ctx -> callback .after_aggregation &&
292
422
ctx -> callback .after_aggregation (ctx ))
293
423
set_kdamond_stop (ctx );
424
+ kdamond_reset_aggregated (ctx );
294
425
if (ctx -> primitive .reset_aggregated )
295
426
ctx -> primitive .reset_aggregated (ctx );
296
427
}
@@ -300,6 +431,10 @@ static int kdamond_fn(void *data)
300
431
ctx -> primitive .update (ctx );
301
432
}
302
433
}
434
+ damon_for_each_target (t , ctx ) {
435
+ damon_for_each_region_safe (r , next , t )
436
+ damon_destroy_region (r );
437
+ }
303
438
304
439
if (ctx -> callback .before_terminate &&
305
440
ctx -> callback .before_terminate (ctx ))
0 commit comments