Newer
Older
Greg Kroah-Hartman
committed
// SPDX-License-Identifier: GPL-2.0
/*
* linux/ipc/shm.c
* Copyright (C) 1992, 1993 Krishna Balasubramanian
* Many improvements/fixes by Bruno Haible.
* Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
* Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
*
* /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
* BIGMEM support, Andrea Arcangeli <andrea@suse.de>
* SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
* HIGHMEM support, Ingo Molnar <mingo@redhat.com>
* Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
* Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
* Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
*
* support for audit of ipc object properties and permission changes
* Dustin Kirkland <dustin.kirkland@us.ibm.com>
*
* namespaces support
* OpenVZ, SWsoft Inc.
* Pavel Emelianov <xemul@openvz.org>
*
* Better ipc lock (kern_ipc_perm.lock) handling
* Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
*/
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/mman.h>
#include <linux/shmem_fs.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/ipc_namespace.h>
struct shm_file_data {
int id;
struct ipc_namespace *ns;
struct file *file;
const struct vm_operations_struct *vm_ops;
};
#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
static const struct file_operations shm_file_operations;
static const struct vm_operations_struct shm_vm_ops;
#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
#define shm_unlock(shp) \
ipc_unlock(&(shp)->shm_perm)
static int newseg(struct ipc_namespace *, struct ipc_params *);
static void shm_open(struct vm_area_struct *vma);
static void shm_close(struct vm_area_struct *vma);
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
int shm_init_ns(struct ipc_namespace *ns)
{
ns->shm_ctlmax = SHMMAX;
ns->shm_ctlall = SHMALL;
ns->shm_ctlmni = SHMMNI;
return ipc_init_ids(&shm_ids(ns));
* Called with shm_ids.rwsem (writer) and the shp structure locked.
* Only shm_ids.rwsem remains locked on exit.
static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
struct shmid_kernel *shp;
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
Alexander Mikhalitsyn
committed
WARN_ON(ns != shp->ns);
shp->shm_perm.mode |= SHM_DEST;
/* Do not find it any more */
ipc_set_key_private(&shm_ids(ns), &shp->shm_perm);
shm_unlock(shp);
} else
shm_destroy(ns, shp);
}
#ifdef CONFIG_IPC_NS
void shm_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht);
static int __init ipc_ns_init(void)
const int err = shm_init_ns(&init_ipc_ns);
WARN(err, "ipc: sysv shm_init_ns failed: %d\n", err);
return err;
}
pure_initcall(ipc_ns_init);
ipc_init_proc_interface("sysvipc/shm",
#if BITS_PER_LONG <= 32
" key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
#else
" key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
#endif
static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
* shm_lock_(check_) routines are called in the paths where the rwsem
static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
* Callers of shm_lock() must validate the status of the returned ipc
* object pointer (as returned by ipc_lock()), and error out as
* appropriate.
if (IS_ERR(ipcp))
return (void *)ipcp;
return container_of(ipcp, struct shmid_kernel, shm_perm);
static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
{
rcu_read_lock();
ipc_lock_object(&ipcp->shm_perm);
static void shm_rcu_free(struct rcu_head *head)
{
struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm,
rcu);
struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel,
shm_perm);
Alexander Mikhalitsyn
committed
/*
* It has to be called with shp locked.
* It must be called before ipc_rmid()
*/
static inline void shm_clist_rm(struct shmid_kernel *shp)
Alexander Mikhalitsyn
committed
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
struct task_struct *creator;
/* ensure that shm_creator does not disappear */
rcu_read_lock();
/*
* A concurrent exit_shm may do a list_del_init() as well.
* Just do nothing if exit_shm already did the work
*/
if (!list_empty(&shp->shm_clist)) {
/*
* shp->shm_creator is guaranteed to be valid *only*
* if shp->shm_clist is not empty.
*/
creator = shp->shm_creator;
task_lock(creator);
/*
* list_del_init() is a nop if the entry was already removed
* from the list.
*/
list_del_init(&shp->shm_clist);
task_unlock(creator);
}
rcu_read_unlock();
}
static inline void shm_rmid(struct shmid_kernel *s)
{
shm_clist_rm(s);
ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
static int __shm_open(struct vm_area_struct *vma)
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
shp = shm_lock(sfd->ns, sfd->id);
if (IS_ERR(shp))
return PTR_ERR(shp);
if (shp->shm_file != sfd->file) {
/* ID was reused */
shm_unlock(shp);
return -EINVAL;
}
shp->shm_atim = ktime_get_real_seconds();
shp->shm_lprid = task_tgid_vnr(current);
return 0;
}
/* This is called by fork, once for every shm attach. */
static void shm_open(struct vm_area_struct *vma)
{
int err = __shm_open(vma);
/*
* We raced in the idr lookup or with shm_destroy().
* Either way, the ID is busted.
*/
WARN_ON_ONCE(err);
}
/*
* shm_destroy - free the struct shmid_kernel
*
* It has to be called with shp and shm_ids.rwsem (writer) locked,
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
struct file *shm_file;
shm_file = shp->shm_file;
shp->shm_file = NULL;
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
Alexander Mikhalitsyn
committed
shm_rmid(shp);
if (!is_file_hugepages(shm_file))
shmem_lock(shm_file, 0, shp->mlock_user);
else if (shp->mlock_user)
user_shm_unlock(i_size_read(file_inode(shm_file)),
shp->mlock_user);
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
/*
* shm_may_destroy - identifies whether shm segment should be destroyed now
*
* Returns true if and only if there are no active users of the segment and
* one of the following is true:
*
* 1) shmctl(id, IPC_RMID, NULL) was called for this shp
*
* 2) sysctl kernel.shm_rmid_forced is set to 1.
*/
Alexander Mikhalitsyn
committed
static bool shm_may_destroy(struct shmid_kernel *shp)
{
return (shp->shm_nattch == 0) &&
Alexander Mikhalitsyn
committed
(shp->ns->shm_rmid_forced ||
(shp->shm_perm.mode & SHM_DEST));
}
* remove the attach descriptor vma.
* free memory for segment if it is marked destroyed.
* The descriptor has already been removed from the current->mm->mmap list
* and will later be kfree()d.
*/
static void shm_close(struct vm_area_struct *vma)
struct shm_file_data *sfd = shm_file_data(file);
struct ipc_namespace *ns = sfd->ns;
/* remove from the list of attaches of the shm segment */
/*
* We raced in the idr lookup or with shm_destroy().
* Either way, the ID is busted.
*/
if (WARN_ON_ONCE(IS_ERR(shp)))
goto done; /* no-op */
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_dtim = ktime_get_real_seconds();
Alexander Mikhalitsyn
committed
if (shm_may_destroy(shp))
shm_destroy(ns, shp);
else
shm_unlock(shp);
/* Called with ns->shm_ids(ns).rwsem locked */
static int shm_try_destroy_orphaned(int id, void *p, void *data)
{
struct ipc_namespace *ns = data;
struct kern_ipc_perm *ipcp = p;
struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
/*
* We want to destroy segments without users and with already
* exit'ed originating process.
* As shp->* are changed under rwsem, it's safe to skip shp locking.
Alexander Mikhalitsyn
committed
if (!list_empty(&shp->shm_clist))
Alexander Mikhalitsyn
committed
if (shm_may_destroy(shp)) {
return 0;
}
void shm_destroy_orphaned(struct ipc_namespace *ns)
{
idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
/* Locking assumes this will only be called with task == current */
void exit_shm(struct task_struct *task)
{
Alexander Mikhalitsyn
committed
for (;;) {
struct shmid_kernel *shp;
struct ipc_namespace *ns;
Alexander Mikhalitsyn
committed
task_lock(task);
if (list_empty(&task->sysvshm.shm_clist)) {
task_unlock(task);
break;
}
shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
shm_clist);
Alexander Mikhalitsyn
committed
* 1) Get pointer to the ipc namespace. It is worth to say
* that this pointer is guaranteed to be valid because
* shp lifetime is always shorter than namespace lifetime
* in which shp lives.
* We taken task_lock it means that shp won't be freed.
Alexander Mikhalitsyn
committed
ns = shp->ns;
Alexander Mikhalitsyn
committed
/*
* 2) If kernel.shm_rmid_forced is not set then only keep track of
* which shmids are orphaned, so that a later set of the sysctl
* can clean them up.
*/
if (!ns->shm_rmid_forced)
goto unlink_continue;
Alexander Mikhalitsyn
committed
/*
* 3) get a reference to the namespace.
* The refcount could be already 0. If it is 0, then
* the shm objects will be free by free_ipc_work().
*/
ns = get_ipc_ns_not_zero(ns);
if (!ns) {
unlink_continue:
list_del_init(&shp->shm_clist);
task_unlock(task);
continue;
Alexander Mikhalitsyn
committed
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
/*
* 4) get a reference to shp.
* This cannot fail: shm_clist_rm() is called before
* ipc_rmid(), thus the refcount cannot be 0.
*/
WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
/*
* 5) unlink the shm segment from the list of segments
* created by current.
* This must be done last. After unlinking,
* only the refcounts obtained above prevent IPC_RMID
* from destroying the segment or the namespace.
*/
list_del_init(&shp->shm_clist);
task_unlock(task);
/*
* 6) we have all references
* Thus lock & if needed destroy shp.
*/
down_write(&shm_ids(ns).rwsem);
shm_lock_by_ptr(shp);
/*
* rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
* safe to call ipc_rcu_putref here
*/
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
if (ipc_valid_object(&shp->shm_perm)) {
if (shm_may_destroy(shp))
shm_destroy(ns, shp);
else
shm_unlock(shp);
} else {
/*
* Someone else deleted the shp from namespace
* idr/kht while we have waited.
* Just unlock and continue.
*/
shm_unlock(shp);
}
up_write(&shm_ids(ns).rwsem);
put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
}
static int shm_fault(struct vm_fault *vmf)
struct file *file = vmf->vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
return sfd->vm_ops->fault(vmf);
static int shm_split(struct vm_area_struct *vma, unsigned long addr)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
if (sfd->vm_ops && sfd->vm_ops->split)
return sfd->vm_ops->split(vma, addr);
return 0;
}
#ifdef CONFIG_NUMA
static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
int err = 0;
if (sfd->vm_ops->set_policy)
err = sfd->vm_ops->set_policy(vma, new);
return err;
}
static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct mempolicy *pol = NULL;
if (sfd->vm_ops->get_policy)
pol = sfd->vm_ops->get_policy(vma, addr);
else if (vma->vm_policy)
pol = vma->vm_policy;
return pol;
}
#endif
static int shm_mmap(struct file *file, struct vm_area_struct *vma)
struct shm_file_data *sfd = shm_file_data(file);
int ret;
* In case of remap_file_pages() emulation, the file can represent an
* IPC ID that was removed, and possibly even reused by another shm
* segment already. Propagate this case as an error to caller.
if (ret)
return ret;
ret = call_mmap(sfd->file, vma);
if (ret) {
shm_close(vma);
return ret;
sfd->vm_ops = vma->vm_ops;
WARN_ON(!sfd->vm_ops->fault);
vma->vm_ops = &shm_vm_ops;
return 0;
static int shm_release(struct inode *ino, struct file *file)
{
struct shm_file_data *sfd = shm_file_data(file);
put_ipc_ns(sfd->ns);
fput(sfd->file);
shm_file_data(file) = NULL;
kfree(sfd);
static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
struct shm_file_data *sfd = shm_file_data(file);
if (!sfd->file->f_op->fsync)
return -EINVAL;
return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
static long shm_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
struct shm_file_data *sfd = shm_file_data(file);
if (!sfd->file->f_op->fallocate)
return -EOPNOTSUPP;
return sfd->file->f_op->fallocate(file, mode, offset, len);
}
static unsigned long shm_get_unmapped_area(struct file *file,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct shm_file_data *sfd = shm_file_data(file);
return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
pgoff, flags);
static const struct file_operations shm_file_operations = {
.fsync = shm_fsync,
.get_unmapped_area = shm_get_unmapped_area,
.fallocate = shm_fallocate,
};
/*
* shm_file_operations_huge is now identical to shm_file_operations,
* but we keep it distinct for the sake of is_file_shm_hugepages().
*/
static const struct file_operations shm_file_operations_huge = {
.mmap = shm_mmap,
.fsync = shm_fsync,
.release = shm_release,
.get_unmapped_area = shm_get_unmapped_area,
.fallocate = shm_fallocate,
bool is_file_shm_hugepages(struct file *file)
{
return file->f_op == &shm_file_operations_huge;
}
static const struct vm_operations_struct shm_vm_ops = {
.open = shm_open, /* callback for a new vm-area open */
.close = shm_close, /* callback for when the vm-area is released */
.fault = shm_fault,
#if defined(CONFIG_NUMA)
.set_policy = shm_set_policy,
.get_policy = shm_get_policy,
/**
* newseg - Create a new shared memory segment
* @ns: namespace
* @params: ptr to the structure that contains key, size and shmflg
*
* Called with shm_ids.rwsem held as a writer.
static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
key_t key = params->key;
int shmflg = params->flg;
size_t size = params->u.size;
size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (size < SHMMIN || size > ns->shm_ctlmax)
if (numpages << PAGE_SHIFT < size)
return -ENOSPC;
if (ns->shm_tot + numpages < ns->shm_tot ||
ns->shm_tot + numpages > ns->shm_ctlall)
shp = kvmalloc(sizeof(*shp), GFP_KERNEL);
if (unlikely(!shp))
shp->shm_perm.mode = (shmflg & S_IRWXUGO);
shp->mlock_user = NULL;
shp->shm_perm.security = NULL;
error = security_shm_alloc(shp);
if (error) {
size_t hugesize;
hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
if (!hs) {
error = -EINVAL;
goto no_file;
}
hugesize = ALIGN(size, huge_page_size(hs));
/* hugetlb_file_setup applies strict accounting */
if (shmflg & SHM_NORESERVE)
acctflag = VM_NORESERVE;
file = hugetlb_file_setup(name, hugesize, acctflag,
&shp->mlock_user, HUGETLB_SHMFS_INODE,
(shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
/*
* Do not allow no accounting for OVERCOMMIT_NEVER, even
*/
if ((shmflg & SHM_NORESERVE) &&
sysctl_overcommit_memory != OVERCOMMIT_NEVER)
acctflag = VM_NORESERVE;
file = shmem_kernel_file_setup(name, size, acctflag);
}
error = PTR_ERR(file);
if (IS_ERR(file))
goto no_file;
shp->shm_cprid = task_tgid_vnr(current);
shp->shm_lprid = 0;
shp->shm_atim = shp->shm_dtim = 0;
shp->shm_ctim = ktime_get_real_seconds();
shp->shm_segsz = size;
shp->shm_nattch = 0;
shp->shm_file = file;
error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
if (error < 0)
goto no_id;
Alexander Mikhalitsyn
committed
shp->ns = ns;
task_lock(current);
list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
Alexander Mikhalitsyn
committed
task_unlock(current);
/*
* shmid gets reported as "inode#" in /proc/pid/maps.
* proc-ps tools use this. Changing this will break them.
*/
ipc_unlock_object(&shp->shm_perm);
if (is_file_hugepages(file) && shp->mlock_user)
user_shm_unlock(size, shp->mlock_user);
call_rcu(&shp->shm_perm.rcu, shm_rcu_free);
* Called with shm_ids.rwsem and ipcp locked.
static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
struct shmid_kernel *shp;
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
return security_shm_associate(shp, shmflg);
* Called with shm_ids.rwsem and ipcp locked.
static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
struct ipc_params *params)
struct shmid_kernel *shp;
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
if (shp->shm_segsz < params->u.size)
SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
static const struct ipc_ops shm_ops = {
.getnew = newseg,
.associate = shm_security,
.more_checks = shm_more_checks,
};
shm_params.key = key;
shm_params.flg = shmflg;
shm_params.u.size = size;
return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
}
static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
{
case IPC_64:
return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD:
{
struct shmid_ds out;
memset(&out, 0, sizeof(out));
ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
out.shm_segsz = in->shm_segsz;
out.shm_atime = in->shm_atime;
out.shm_dtime = in->shm_dtime;
out.shm_ctime = in->shm_ctime;
out.shm_cpid = in->shm_cpid;
out.shm_lpid = in->shm_lpid;
out.shm_nattch = in->shm_nattch;
return copy_to_user(buf, &out, sizeof(out));
}
default:
return -EINVAL;
}
}
static inline unsigned long
copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
if (copy_from_user(out, buf, sizeof(*out)))
return -EFAULT;
return 0;
case IPC_OLD:
{
struct shmid_ds tbuf_old;
if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
return -EFAULT;
out->shm_perm.uid = tbuf_old.shm_perm.uid;
out->shm_perm.gid = tbuf_old.shm_perm.gid;
out->shm_perm.mode = tbuf_old.shm_perm.mode;
return 0;
}
default:
return -EINVAL;
}
}
static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
{
case IPC_64:
return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD:
{
struct shminfo out;
out.shmmax = INT_MAX;
else
out.shmmax = (int)in->shmmax;
out.shmmin = in->shmmin;
out.shmmni = in->shmmni;
out.shmseg = in->shmseg;
return copy_to_user(buf, &out, sizeof(out));
}
default:
return -EINVAL;
}
}
/*
* Calculate and add used RSS and swap pages of a shm.
* Called with shm_ids.rwsem held as a reader
*/
static void shm_add_rss_swap(struct shmid_kernel *shp,
unsigned long *rss_add, unsigned long *swp_add)
{
struct inode *inode;
if (is_file_hugepages(shp->shm_file)) {
struct address_space *mapping = inode->i_mapping;
struct hstate *h = hstate_file(shp->shm_file);
*rss_add += pages_per_huge_page(h) * mapping->nrpages;
} else {
#ifdef CONFIG_SHMEM
struct shmem_inode_info *info = SHMEM_I(inode);
spin_lock_irq(&info->lock);
*rss_add += inode->i_mapping->nrpages;
*swp_add += info->swapped;
spin_unlock_irq(&info->lock);
#else
*rss_add += inode->i_mapping->nrpages;
#endif
}
}
* Called with shm_ids.rwsem held as a reader
static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
unsigned long *swp)
in_use = shm_ids(ns).in_use;
for (total = 0, next_id = 0; total < in_use; next_id++) {
ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
if (ipc == NULL)
shp = container_of(ipc, struct shmid_kernel, shm_perm);
shm_add_rss_swap(shp, rss, swp);
* This function handles some shmctl commands which require the rwsem
* NOTE: no locks must be held, the rwsem is taken inside this function.
*/
static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
struct kern_ipc_perm *ipcp;
struct shmid_kernel *shp;
int err;
ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
if (IS_ERR(ipcp)) {
err = PTR_ERR(ipcp);
goto out_unlock1;
}
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
err = security_shm_shmctl(shp, cmd);
if (err)
switch (cmd) {
case IPC_RMID:
ipc_lock_object(&shp->shm_perm);
/* do_shm_rmid unlocks the ipc object and rcu */
do_shm_rmid(ns, ipcp);
goto out_up;
case IPC_SET:
ipc_lock_object(&shp->shm_perm);
err = ipc_update_perm(&shmid64->shm_perm, ipcp);
shp->shm_ctim = ktime_get_real_seconds();
break;
default:
err = -EINVAL;
out_unlock0:
ipc_unlock_object(&shp->shm_perm);
out_unlock1:
rcu_read_unlock();
static int shmctl_ipc_info(struct ipc_namespace *ns,
struct shminfo64 *shminfo)
int err = security_shm_shmctl(NULL, IPC_INFO);
if (!err) {
memset(shminfo, 0, sizeof(*shminfo));
shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni;
shminfo->shmmax = ns->shm_ctlmax;
shminfo->shmall = ns->shm_ctlall;
shminfo->shmmin = SHMMIN;
static int shmctl_shm_info(struct ipc_namespace *ns,
struct shm_info *shm_info)
{
int err = security_shm_shmctl(NULL, SHM_INFO);
if (!err) {
memset(shm_info, 0, sizeof(*shm_info));