Newer
Older
/*
* Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
* Authors: David Chinner and Glauber Costa
*
* Generic LRU infrastructure
*/
#include <linux/kernel.h>
#include <linux/module.h>
Kirill Tkhai
committed
#ifdef CONFIG_MEMCG_KMEM
static LIST_HEAD(list_lrus);
static DEFINE_MUTEX(list_lrus_mutex);
static void list_lru_register(struct list_lru *lru)
{
mutex_lock(&list_lrus_mutex);
list_add(&lru->list, &list_lrus);
mutex_unlock(&list_lrus_mutex);
}
static void list_lru_unregister(struct list_lru *lru)
{
mutex_lock(&list_lrus_mutex);
list_del(&lru->list);
mutex_unlock(&list_lrus_mutex);
}
Kirill Tkhai
committed
static int lru_shrinker_id(struct list_lru *lru)
{
return lru->shrinker_id;
}
static inline bool list_lru_memcg_aware(struct list_lru *lru)
{
/*
* This needs node 0 to be always present, even
* in the systems supporting sparse numa ids.
*/
return !!lru->node[0].memcg_lrus;
}
static inline struct list_lru_one *
list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
{
struct list_lru_memcg *memcg_lrus;
* Either lock or RCU protects the array of per cgroup lists
* from relocation (see memcg_update_list_lru_node).
memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
lockdep_is_held(&nlru->lock));
if (memcg_lrus && idx >= 0)
return memcg_lrus->lru[idx];
static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
{
struct page *page;
if (!memcg_kmem_enabled())
return NULL;
page = virt_to_head_page(ptr);
return page->mem_cgroup;
}
static inline struct list_lru_one *
list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
struct mem_cgroup **memcg_ptr)
struct list_lru_one *l = &nlru->lru;
struct mem_cgroup *memcg = NULL;
memcg = mem_cgroup_from_kmem(ptr);
if (!memcg)
l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
out:
if (memcg_ptr)
*memcg_ptr = memcg;
return l;
static void list_lru_register(struct list_lru *lru)
{
}
static void list_lru_unregister(struct list_lru *lru)
{
}
Kirill Tkhai
committed
static int lru_shrinker_id(struct list_lru *lru)
{
return -1;
}
static inline bool list_lru_memcg_aware(struct list_lru *lru)
{
return false;
}
static inline struct list_lru_one *
list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
{
return &nlru->lru;
}
static inline struct list_lru_one *
list_lru_from_kmem(struct list_lru_node *nlru, void *ptr,
struct mem_cgroup **memcg_ptr)
if (memcg_ptr)
*memcg_ptr = NULL;
Kirill Tkhai
committed
#endif /* CONFIG_MEMCG_KMEM */
bool list_lru_add(struct list_lru *lru, struct list_head *item)
{
int nid = page_to_nid(virt_to_page(item));
struct list_lru_node *nlru = &lru->node[nid];
Kirill Tkhai
committed
struct mem_cgroup *memcg;
Kirill Tkhai
committed
l = list_lru_from_kmem(nlru, item, &memcg);
Kirill Tkhai
committed
/* Set shrinker bit if the first element was added */
if (!l->nr_items++)
memcg_set_shrinker_bit(memcg, nid,
lru_shrinker_id(lru));
nlru->nr_items++;
return false;
}
EXPORT_SYMBOL_GPL(list_lru_add);
bool list_lru_del(struct list_lru *lru, struct list_head *item)
{
int nid = page_to_nid(virt_to_page(item));
struct list_lru_node *nlru = &lru->node[nid];
l = list_lru_from_kmem(nlru, item, NULL);
nlru->nr_items--;
return false;
}
EXPORT_SYMBOL_GPL(list_lru_del);
void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
{
list_del_init(item);
list->nr_items--;
}
EXPORT_SYMBOL_GPL(list_lru_isolate);
void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
struct list_head *head)
{
list_move(item, head);
list->nr_items--;
}
EXPORT_SYMBOL_GPL(list_lru_isolate_move);
unsigned long list_lru_count_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg)
struct list_lru_one *l;
unsigned long count;
l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
EXPORT_SYMBOL_GPL(list_lru_count_one);
unsigned long list_lru_count_node(struct list_lru *lru, int nid)
{
struct list_lru_node *nlru;
nlru = &lru->node[nid];
return nlru->nr_items;
static unsigned long
__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk)
struct list_lru_node *nlru = &lru->node[nid];
struct list_lru_one *l;
l = list_lru_from_memcg_idx(nlru, memcg_idx);
list_for_each_safe(item, n, &l->list) {
/*
* decrement nr_to_walk first so that we don't livelock if we
* get stuck on large numbesr of LRU_RETRY items
*/
if (!*nr_to_walk)
--*nr_to_walk;
ret = isolate(item, l, &nlru->lock, cb_arg);
case LRU_REMOVED_RETRY:
assert_spin_locked(&nlru->lock);
/* fall through */
nlru->nr_items--;
/*
* If the lru lock has been dropped, our list
* traversal is now invalid and so we have to
* restart from scratch.
*/
if (ret == LRU_REMOVED_RETRY)
goto restart;
break;
case LRU_SKIP:
break;
case LRU_RETRY:
/*
* The lru lock has been dropped, our list traversal is
* now invalid and so we have to restart from scratch.
*/
assert_spin_locked(&nlru->lock);
goto restart;
default:
BUG();
}
}
spin_unlock(&nlru->lock);
return isolated;
}
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
unsigned long
list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk)
{
return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
isolate, cb_arg, nr_to_walk);
}
EXPORT_SYMBOL_GPL(list_lru_walk_one);
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk)
{
long isolated = 0;
int memcg_idx;
isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
nr_to_walk);
if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
for_each_memcg_cache_index(memcg_idx) {
isolated += __list_lru_walk_one(lru, nid, memcg_idx,
isolate, cb_arg, nr_to_walk);
if (*nr_to_walk <= 0)
break;
}
}
return isolated;
}
EXPORT_SYMBOL_GPL(list_lru_walk_node);
static void init_one_lru(struct list_lru_one *l)
{
INIT_LIST_HEAD(&l->list);
l->nr_items = 0;
}
Kirill Tkhai
committed
#ifdef CONFIG_MEMCG_KMEM
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
int begin, int end)
{
int i;
for (i = begin; i < end; i++)
kfree(memcg_lrus->lru[i]);
}
static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
int begin, int end)
{
int i;
for (i = begin; i < end; i++) {
struct list_lru_one *l;
l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
if (!l)
goto fail;
init_one_lru(l);
memcg_lrus->lru[i] = l;
}
return 0;
fail:
__memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
return -ENOMEM;
}
static int memcg_init_list_lru_node(struct list_lru_node *nlru)
{
struct list_lru_memcg *memcg_lrus;
int size = memcg_nr_cache_ids;
memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
size * sizeof(void *), GFP_KERNEL);
if (!memcg_lrus)
if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
kvfree(memcg_lrus);
RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
return 0;
}
static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
{
struct list_lru_memcg *memcg_lrus;
/*
* This is called when shrinker has already been unregistered,
* and nobody can use it. So, there is no need to use kvfree_rcu().
*/
memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
__memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
kvfree(memcg_lrus);
}
static void kvfree_rcu(struct rcu_head *head)
{
struct list_lru_memcg *mlru;
mlru = container_of(head, struct list_lru_memcg, rcu);
kvfree(mlru);
}
static int memcg_update_list_lru_node(struct list_lru_node *nlru,
int old_size, int new_size)
{
struct list_lru_memcg *old, *new;
BUG_ON(old_size > new_size);
old = rcu_dereference_protected(nlru->memcg_lrus,
lockdep_is_held(&list_lrus_mutex));
new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
if (!new)
return -ENOMEM;
if (__memcg_init_list_lru_node(new, old_size, new_size)) {
kvfree(new);
memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
* The locking below allows readers that hold nlru->lock avoid taking
* rcu_read_lock (see list_lru_from_memcg_idx).
*
* Since list_lru_{add,del} may be called under an IRQ-safe lock,
* we have to use IRQ-safe primitives here to avoid deadlock.
*/
spin_lock_irq(&nlru->lock);
rcu_assign_pointer(nlru->memcg_lrus, new);
spin_unlock_irq(&nlru->lock);
call_rcu(&old->rcu, kvfree_rcu);
return 0;
}
static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
int old_size, int new_size)
{
struct list_lru_memcg *memcg_lrus;
memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
lockdep_is_held(&list_lrus_mutex));
/* do not bother shrinking the array back to the old size, because we
* cannot handle allocation failures here */
__memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
}
static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
int i;
if (!memcg_aware)
return 0;
for_each_node(i) {
if (memcg_init_list_lru_node(&lru->node[i]))
goto fail;
}
return 0;
fail:
for (i = i - 1; i >= 0; i--) {
if (!lru->node[i].memcg_lrus)
continue;
memcg_destroy_list_lru_node(&lru->node[i]);
return -ENOMEM;
}
static void memcg_destroy_list_lru(struct list_lru *lru)
{
int i;
if (!list_lru_memcg_aware(lru))
return;
for_each_node(i)
memcg_destroy_list_lru_node(&lru->node[i]);
}
static int memcg_update_list_lru(struct list_lru *lru,
int old_size, int new_size)
{
int i;
if (!list_lru_memcg_aware(lru))
return 0;
for_each_node(i) {
if (memcg_update_list_lru_node(&lru->node[i],
old_size, new_size))
goto fail;
}
return 0;
fail:
for (i = i - 1; i >= 0; i--) {
if (!lru->node[i].memcg_lrus)
continue;
memcg_cancel_update_list_lru_node(&lru->node[i],
old_size, new_size);
return -ENOMEM;
}
static void memcg_cancel_update_list_lru(struct list_lru *lru,
int old_size, int new_size)
{
int i;
if (!list_lru_memcg_aware(lru))
return;
for_each_node(i)
memcg_cancel_update_list_lru_node(&lru->node[i],
old_size, new_size);
}
int memcg_update_all_list_lrus(int new_size)
{
int ret = 0;
struct list_lru *lru;
int old_size = memcg_nr_cache_ids;
mutex_lock(&list_lrus_mutex);
list_for_each_entry(lru, &list_lrus, list) {
ret = memcg_update_list_lru(lru, old_size, new_size);
if (ret)
goto fail;
}
out:
mutex_unlock(&list_lrus_mutex);
return ret;
fail:
list_for_each_entry_continue_reverse(lru, &list_lrus, list)
memcg_cancel_update_list_lru(lru, old_size, new_size);
goto out;
}
static void memcg_drain_list_lru_node(struct list_lru *lru, int nid,
int src_idx, struct mem_cgroup *dst_memcg)
struct list_lru_node *nlru = &lru->node[nid];
int dst_idx = dst_memcg->kmemcg_id;
struct list_lru_one *src, *dst;
Kirill Tkhai
committed
bool set;
/*
* Since list_lru_{add,del} may be called under an IRQ-safe lock,
* we have to use IRQ-safe primitives here to avoid deadlock.
*/
spin_lock_irq(&nlru->lock);
src = list_lru_from_memcg_idx(nlru, src_idx);
dst = list_lru_from_memcg_idx(nlru, dst_idx);
list_splice_init(&src->list, &dst->list);
Kirill Tkhai
committed
set = (!dst->nr_items && src->nr_items);
dst->nr_items += src->nr_items;
Kirill Tkhai
committed
if (set)
memcg_set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru));
src->nr_items = 0;
spin_unlock_irq(&nlru->lock);
}
static void memcg_drain_list_lru(struct list_lru *lru,
int src_idx, struct mem_cgroup *dst_memcg)
{
int i;
if (!list_lru_memcg_aware(lru))
return;
for_each_node(i)
memcg_drain_list_lru_node(lru, i, src_idx, dst_memcg);
void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg)
{
struct list_lru *lru;
mutex_lock(&list_lrus_mutex);
list_for_each_entry(lru, &list_lrus, list)
memcg_drain_list_lru(lru, src_idx, dst_memcg);
mutex_unlock(&list_lrus_mutex);
}
#else
static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
{
return 0;
}
static void memcg_destroy_list_lru(struct list_lru *lru)
{
}
Kirill Tkhai
committed
#endif /* CONFIG_MEMCG_KMEM */
int __list_lru_init(struct list_lru *lru, bool memcg_aware,
struct lock_class_key *key, struct shrinker *shrinker)
size_t size = sizeof(*lru->node) * nr_node_ids;
#ifdef CONFIG_MEMCG_KMEM
if (shrinker)
lru->shrinker_id = shrinker->id;
else
lru->shrinker_id = -1;
#endif
lru->node = kzalloc(size, GFP_KERNEL);
if (!lru->node)
for_each_node(i) {
spin_lock_init(&lru->node[i].lock);
if (key)
lockdep_set_class(&lru->node[i].lock, key);
init_one_lru(&lru->node[i].lru);
}
err = memcg_init_list_lru(lru, memcg_aware);
if (err) {
kfree(lru->node);
/* Do this so a list_lru_destroy() doesn't crash: */
lru->node = NULL;
out:
memcg_put_cache_ids();
return err;
EXPORT_SYMBOL_GPL(__list_lru_init);
void list_lru_destroy(struct list_lru *lru)
{
/* Already destroyed or not yet initialized? */
if (!lru->node)
return;
#ifdef CONFIG_MEMCG_KMEM
lru->shrinker_id = -1;
#endif
}
EXPORT_SYMBOL_GPL(list_lru_destroy);