From ea0c9d768bade0bc87046841a3e667f477d90668 Mon Sep 17 00:00:00 2001 From: Oleksandr Natalenko Date: Mon, 6 May 2019 22:02:49 +0200 Subject: [PATCH] uksm-5.1: apply 52d1e606ee733 Signed-off-by: Oleksandr Natalenko --- mm/uksm.c | 36 +++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/mm/uksm.c b/mm/uksm.c index 174e1921eade..2531599038b8 100644 --- a/mm/uksm.c +++ b/mm/uksm.c @@ -875,8 +875,8 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node, * to keep the page_count protocol described with page_cache_get_speculative. * * Note: it is possible that get_uksm_page() will return NULL one moment, - * then page the next, if the page is in between page_freeze_refs() and - * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page + * then page the next, if the page is in between page_ref_freeze() and + * page_ref_unfreeze(): this shouldn't be a problem anywhere, the page * is on its way to being freed; but it is an anomaly to bear in mind. * * @unlink_rb: if the removal of this node will firstly unlink from @@ -914,10 +914,11 @@ static struct page *get_uksm_page(struct stable_node *stable_node, * We cannot do anything with the page while its refcount is 0. * Usually 0 means free, or tail of a higher-order page: in which * case this node is no longer referenced, and should be freed; - * however, it might mean that the page is under page_freeze_refs(). + * however, it might mean that the page is under page_ref_freeze(). * The __remove_mapping() case is easy, again the node is now stale; - * but if page is swapcache in migrate_page_move_mapping(), it might - * still be our page, in which case it's essential to keep the node. + * the same is in reuse_ksm_page() case; but if page is swapcache + * in migrate_page_move_mapping(), it might still be our page, + * in which case it's essential to keep the node. */ while (!get_page_unless_zero(page)) { /* @@ -4749,6 +4750,31 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) goto again; } +bool reuse_ksm_page(struct page *page, + struct vm_area_struct *vma, + unsigned long address) +{ +#ifdef CONFIG_DEBUG_VM + if (WARN_ON(is_zero_pfn(page_to_pfn(page))) || + WARN_ON(!page_mapped(page)) || + WARN_ON(!PageLocked(page))) { + dump_page(page, "reuse_ksm_page"); + return false; + } +#endif + + if (PageSwapCache(page) || !page_stable_node(page)) + return false; + /* Prohibit parallel get_ksm_page() */ + if (!page_ref_freeze(page, 1)) + return false; + + page_move_anon_rmap(page, vma); + page->index = linear_page_index(vma, address); + page_ref_unfreeze(page, 1); + + return true; +} #ifdef CONFIG_MIGRATION /* Common ksm interface but may be specific to uksm */ void ksm_migrate_page(struct page *newpage, struct page *oldpage) -- 2.21.0.777.g83232e3864