aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-03-25 11:43:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-03-25 11:43:43 -0700
commit002322402dafd846c424ffa9240a937f49b48c42 (patch)
tree73382f24d0dc7dfc2dfb4347ff978bad3712859e /include
parent2ba9bea2d3682361f0f22f68a400bcee4248c205 (diff)
parentd3e2ff28ca27352d43a41053263d620202324c1f (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "14 patches. Subsystems affected by this patch series: mm (hugetlb, kasan, gup, selftests, z3fold, kfence, memblock, and highmem), squashfs, ia64, gcov, and mailmap" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mailmap: update Andrey Konovalov's email address mm/highmem: fix CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP mm: memblock: fix section mismatch warning again kfence: make compatible with kmemleak gcov: fix clang-11+ support ia64: fix format strings for err_inject ia64: mca: allocate early mca with GFP_ATOMIC squashfs: fix xattr id and id lookup sanity checks squashfs: fix inode lookup sanity checks z3fold: prevent reclaim/free race for headless pages selftests/vm: fix out-of-tree build mm/mmu_notifiers: ensure range_end() is paired with range_start() kasan: fix per-page tags for non-page_alloc pages hugetlb_cgroup: fix imbalanced css_get and css_put pair for shared mappings
Diffstat (limited to 'include')
-rw-r--r--include/linux/hugetlb_cgroup.h15
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/mm.h18
-rw-r--r--include/linux/mmu_notifier.h10
4 files changed, 35 insertions, 12 deletions
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 2ad6e92f124a..0bff345c4bc6 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -113,6 +113,11 @@ static inline bool hugetlb_cgroup_disabled(void)
return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
}
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+ css_put(&h_cg->css);
+}
+
extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr);
extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
@@ -138,7 +143,8 @@ extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg,
- unsigned long nr_pages);
+ unsigned long nr_pages,
+ bool region_del);
extern void hugetlb_cgroup_file_init(void) __init;
extern void hugetlb_cgroup_migrate(struct page *oldhpage,
@@ -147,7 +153,8 @@ extern void hugetlb_cgroup_migrate(struct page *oldhpage,
#else
static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
struct file_region *rg,
- unsigned long nr_pages)
+ unsigned long nr_pages,
+ bool region_del)
{
}
@@ -185,6 +192,10 @@ static inline bool hugetlb_cgroup_disabled(void)
return true;
}
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+}
+
static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
struct hugetlb_cgroup **ptr)
{
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index d13e3cd938b4..5984fff3f175 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -460,7 +460,7 @@ static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
/*
* Set the allocation direction to bottom-up or top-down.
*/
-static inline __init void memblock_set_bottom_up(bool enable)
+static inline __init_memblock void memblock_set_bottom_up(bool enable)
{
memblock.bottom_up = enable;
}
@@ -470,7 +470,7 @@ static inline __init void memblock_set_bottom_up(bool enable)
* if this is true, that said, memblock will allocate memory
* in bottom-up direction.
*/
-static inline __init bool memblock_bottom_up(void)
+static inline __init_memblock bool memblock_bottom_up(void)
{
return memblock.bottom_up;
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 64a71bf20536..8ba434287387 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1461,16 +1461,28 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
+/*
+ * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
+ * setting tags for all pages to native kernel tag value 0xff, as the default
+ * value 0x00 maps to 0xff.
+ */
+
static inline u8 page_kasan_tag(const struct page *page)
{
- if (kasan_enabled())
- return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
- return 0xff;
+ u8 tag = 0xff;
+
+ if (kasan_enabled()) {
+ tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+ tag ^= 0xff;
+ }
+
+ return tag;
}
static inline void page_kasan_tag_set(struct page *page, u8 tag)
{
if (kasan_enabled()) {
+ tag ^= 0xff;
page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
}
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index b8200782dede..1a6a9eb6d3fa 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -169,11 +169,11 @@ struct mmu_notifier_ops {
* the last refcount is dropped.
*
* If blockable argument is set to false then the callback cannot
- * sleep and has to return with -EAGAIN. 0 should be returned
- * otherwise. Please note that if invalidate_range_start approves
- * a non-blocking behavior then the same applies to
- * invalidate_range_end.
- *
+ * sleep and has to return with -EAGAIN if sleeping would be required.
+ * 0 should be returned otherwise. Please note that notifiers that can
+ * fail invalidate_range_start are not allowed to implement
+ * invalidate_range_end, as there is no mechanism for informing the
+ * notifier that its start failed.
*/
int (*invalidate_range_start)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);