aboutsummaryrefslogtreecommitdiff
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h40
1 files changed, 35 insertions, 5 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5c76737d836b..ae19af5ec02c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -474,10 +474,16 @@ struct zone {
* frequently read in proximity to zone->lock. It's good to
* give them a chance of being in the same cacheline.
*
- * Write access to present_pages and managed_pages at runtime should
- * be protected by lock_memory_hotplug()/unlock_memory_hotplug().
- * Any reader who can't tolerant drift of present_pages and
- * managed_pages should hold memory hotplug lock to get a stable value.
+ * Write access to present_pages at runtime should be protected by
+ * lock_memory_hotplug()/unlock_memory_hotplug(). Any reader who can't
+ * tolerant drift of present_pages should hold memory hotplug lock to
+ * get a stable value.
+ *
+ * Read access to managed_pages should be safe because it's unsigned
+ * long. Write access to zone->managed_pages and totalram_pages are
+ * protected by managed_page_count_lock at runtime. Idealy only
+ * adjust_managed_page_count() should be used instead of directly
+ * touching zone->managed_pages and totalram_pages.
*/
unsigned long spanned_pages;
unsigned long present_pages;
@@ -495,6 +501,13 @@ typedef enum {
ZONE_CONGESTED, /* zone has many dirty pages backed by
* a congested BDI
*/
+ ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found
+ * many dirty file pages at the tail
+ * of the LRU.
+ */
+ ZONE_WRITEBACK, /* reclaim scanning has recently found
+ * many pages under writeback
+ */
} zone_flags_t;
static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
@@ -517,6 +530,16 @@ static inline int zone_is_reclaim_congested(const struct zone *zone)
return test_bit(ZONE_CONGESTED, &zone->flags);
}
+static inline int zone_is_reclaim_dirty(const struct zone *zone)
+{
+ return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags);
+}
+
+static inline int zone_is_reclaim_writeback(const struct zone *zone)
+{
+ return test_bit(ZONE_WRITEBACK, &zone->flags);
+}
+
static inline int zone_is_reclaim_locked(const struct zone *zone)
{
return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
@@ -716,7 +739,10 @@ typedef struct pglist_data {
* or node_spanned_pages stay constant. Holding this will also
* guarantee that any pfn_valid() stays that way.
*
- * Nests above zone->lock and zone->size_seqlock.
+ * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
+ * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG.
+ *
+ * Nests above zone->lock and zone->span_seqlock
*/
spinlock_t node_size_lock;
#endif
@@ -1111,6 +1137,10 @@ struct mem_section {
struct page_cgroup *page_cgroup;
unsigned long pad;
#endif
+ /*
+ * WARNING: mem_section must be a power-of-2 in size for the
+ * calculation and use of SECTION_ROOT_MASK to make sense.
+ */
};
#ifdef CONFIG_SPARSEMEM_EXTREME