summaryrefslogtreecommitdiff
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1511786462f3..871d9584d9cf 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -67,6 +67,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/nodemask.h>
+#include <linux/cpuset.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -167,6 +168,10 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
if (copy_from_user(nodes, nmask, nlongs*sizeof(unsigned long)))
return -EFAULT;
nodes[nlongs-1] &= endmask;
+ /* Update current mems_allowed */
+ cpuset_update_current_mems_allowed();
+ /* Ignore nodes not set in current->mems_allowed */
+ cpuset_restrict_to_mems_allowed(nodes);
return mpol_check_policy(mode, nodes);
}
@@ -655,8 +660,10 @@ static struct zonelist *zonelist_policy(unsigned gfp, struct mempolicy *policy)
break;
case MPOL_BIND:
/* Lower zones don't get a policy applied */
+ /* Careful: current->mems_allowed might have moved */
if (gfp >= policy_zone)
- return policy->v.zonelist;
+ if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
+ return policy->v.zonelist;
/*FALL THROUGH*/
case MPOL_INTERLEAVE: /* should not happen */
case MPOL_DEFAULT:
@@ -747,6 +754,8 @@ alloc_page_vma(unsigned gfp, struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = get_vma_policy(vma, addr);
+ cpuset_update_current_mems_allowed();
+
if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
unsigned nid;
if (vma) {
@@ -784,6 +793,8 @@ struct page *alloc_pages_current(unsigned gfp, unsigned order)
{
struct mempolicy *pol = current->mempolicy;
+ if (!in_interrupt())
+ cpuset_update_current_mems_allowed();
if (!pol || in_interrupt())
pol = &default_policy;
if (pol->policy == MPOL_INTERLEAVE)