~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/livepatch/patch.c

Version: ~ [ linux-5.13-rc7 ] ~ [ linux-5.12.12 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.45 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.127 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.195 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.237 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.273 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.273 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * patch.c - livepatch patching functions
  4  *
  5  * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
  6  * Copyright (C) 2014 SUSE
  7  * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
  8  */
  9 
 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 11 
 12 #include <linux/livepatch.h>
 13 #include <linux/list.h>
 14 #include <linux/ftrace.h>
 15 #include <linux/rculist.h>
 16 #include <linux/slab.h>
 17 #include <linux/bug.h>
 18 #include <linux/printk.h>
 19 #include "core.h"
 20 #include "patch.h"
 21 #include "transition.h"
 22 
 23 static LIST_HEAD(klp_ops);
 24 
 25 struct klp_ops *klp_find_ops(void *old_func)
 26 {
 27         struct klp_ops *ops;
 28         struct klp_func *func;
 29 
 30         list_for_each_entry(ops, &klp_ops, node) {
 31                 func = list_first_entry(&ops->func_stack, struct klp_func,
 32                                         stack_node);
 33                 if (func->old_func == old_func)
 34                         return ops;
 35         }
 36 
 37         return NULL;
 38 }
 39 
 40 static void notrace klp_ftrace_handler(unsigned long ip,
 41                                        unsigned long parent_ip,
 42                                        struct ftrace_ops *fops,
 43                                        struct pt_regs *regs)
 44 {
 45         struct klp_ops *ops;
 46         struct klp_func *func;
 47         int patch_state;
 48 
 49         ops = container_of(fops, struct klp_ops, fops);
 50 
 51         /*
 52          * A variant of synchronize_rcu() is used to allow patching functions
 53          * where RCU is not watching, see klp_synchronize_transition().
 54          */
 55         preempt_disable_notrace();
 56 
 57         func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
 58                                       stack_node);
 59 
 60         /*
 61          * func should never be NULL because preemption should be disabled here
 62          * and unregister_ftrace_function() does the equivalent of a
 63          * synchronize_rcu() before the func_stack removal.
 64          */
 65         if (WARN_ON_ONCE(!func))
 66                 goto unlock;
 67 
 68         /*
 69          * In the enable path, enforce the order of the ops->func_stack and
 70          * func->transition reads.  The corresponding write barrier is in
 71          * __klp_enable_patch().
 72          *
 73          * (Note that this barrier technically isn't needed in the disable
 74          * path.  In the rare case where klp_update_patch_state() runs before
 75          * this handler, its TIF_PATCH_PENDING read and this func->transition
 76          * read need to be ordered.  But klp_update_patch_state() already
 77          * enforces that.)
 78          */
 79         smp_rmb();
 80 
 81         if (unlikely(func->transition)) {
 82 
 83                 /*
 84                  * Enforce the order of the func->transition and
 85                  * current->patch_state reads.  Otherwise we could read an
 86                  * out-of-date task state and pick the wrong function.  The
 87                  * corresponding write barrier is in klp_init_transition().
 88                  */
 89                 smp_rmb();
 90 
 91                 patch_state = current->patch_state;
 92 
 93                 WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
 94 
 95                 if (patch_state == KLP_UNPATCHED) {
 96                         /*
 97                          * Use the previously patched version of the function.
 98                          * If no previous patches exist, continue with the
 99                          * original function.
100                          */
101                         func = list_entry_rcu(func->stack_node.next,
102                                               struct klp_func, stack_node);
103 
104                         if (&func->stack_node == &ops->func_stack)
105                                 goto unlock;
106                 }
107         }
108 
109         /*
110          * NOPs are used to replace existing patches with original code.
111          * Do nothing! Setting pc would cause an infinite loop.
112          */
113         if (func->nop)
114                 goto unlock;
115 
116         klp_arch_set_pc(regs, (unsigned long)func->new_func);
117 
118 unlock:
119         preempt_enable_notrace();
120 }
121 
122 /*
123  * Convert a function address into the appropriate ftrace location.
124  *
125  * Usually this is just the address of the function, but on some architectures
126  * it's more complicated so allow them to provide a custom behaviour.
127  */
128 #ifndef klp_get_ftrace_location
129 static unsigned long klp_get_ftrace_location(unsigned long faddr)
130 {
131         return faddr;
132 }
133 #endif
134 
135 static void klp_unpatch_func(struct klp_func *func)
136 {
137         struct klp_ops *ops;
138 
139         if (WARN_ON(!func->patched))
140                 return;
141         if (WARN_ON(!func->old_func))
142                 return;
143 
144         ops = klp_find_ops(func->old_func);
145         if (WARN_ON(!ops))
146                 return;
147 
148         if (list_is_singular(&ops->func_stack)) {
149                 unsigned long ftrace_loc;
150 
151                 ftrace_loc =
152                         klp_get_ftrace_location((unsigned long)func->old_func);
153                 if (WARN_ON(!ftrace_loc))
154                         return;
155 
156                 WARN_ON(unregister_ftrace_function(&ops->fops));
157                 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
158 
159                 list_del_rcu(&func->stack_node);
160                 list_del(&ops->node);
161                 kfree(ops);
162         } else {
163                 list_del_rcu(&func->stack_node);
164         }
165 
166         func->patched = false;
167 }
168 
169 static int klp_patch_func(struct klp_func *func)
170 {
171         struct klp_ops *ops;
172         int ret;
173 
174         if (WARN_ON(!func->old_func))
175                 return -EINVAL;
176 
177         if (WARN_ON(func->patched))
178                 return -EINVAL;
179 
180         ops = klp_find_ops(func->old_func);
181         if (!ops) {
182                 unsigned long ftrace_loc;
183 
184                 ftrace_loc =
185                         klp_get_ftrace_location((unsigned long)func->old_func);
186                 if (!ftrace_loc) {
187                         pr_err("failed to find location for function '%s'\n",
188                                 func->old_name);
189                         return -EINVAL;
190                 }
191 
192                 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
193                 if (!ops)
194                         return -ENOMEM;
195 
196                 ops->fops.func = klp_ftrace_handler;
197                 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
198                                   FTRACE_OPS_FL_DYNAMIC |
199                                   FTRACE_OPS_FL_IPMODIFY |
200                                   FTRACE_OPS_FL_PERMANENT;
201 
202                 list_add(&ops->node, &klp_ops);
203 
204                 INIT_LIST_HEAD(&ops->func_stack);
205                 list_add_rcu(&func->stack_node, &ops->func_stack);
206 
207                 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
208                 if (ret) {
209                         pr_err("failed to set ftrace filter for function '%s' (%d)\n",
210                                func->old_name, ret);
211                         goto err;
212                 }
213 
214                 ret = register_ftrace_function(&ops->fops);
215                 if (ret) {
216                         pr_err("failed to register ftrace handler for function '%s' (%d)\n",
217                                func->old_name, ret);
218                         ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
219                         goto err;
220                 }
221 
222 
223         } else {
224                 list_add_rcu(&func->stack_node, &ops->func_stack);
225         }
226 
227         func->patched = true;
228 
229         return 0;
230 
231 err:
232         list_del_rcu(&func->stack_node);
233         list_del(&ops->node);
234         kfree(ops);
235         return ret;
236 }
237 
238 static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
239 {
240         struct klp_func *func;
241 
242         klp_for_each_func(obj, func) {
243                 if (nops_only && !func->nop)
244                         continue;
245 
246                 if (func->patched)
247                         klp_unpatch_func(func);
248         }
249 
250         if (obj->dynamic || !nops_only)
251                 obj->patched = false;
252 }
253 
254 
255 void klp_unpatch_object(struct klp_object *obj)
256 {
257         __klp_unpatch_object(obj, false);
258 }
259 
260 int klp_patch_object(struct klp_object *obj)
261 {
262         struct klp_func *func;
263         int ret;
264 
265         if (WARN_ON(obj->patched))
266                 return -EINVAL;
267 
268         klp_for_each_func(obj, func) {
269                 ret = klp_patch_func(func);
270                 if (ret) {
271                         klp_unpatch_object(obj);
272                         return ret;
273                 }
274         }
275         obj->patched = true;
276 
277         return 0;
278 }
279 
280 static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
281 {
282         struct klp_object *obj;
283 
284         klp_for_each_object(patch, obj)
285                 if (obj->patched)
286                         __klp_unpatch_object(obj, nops_only);
287 }
288 
289 void klp_unpatch_objects(struct klp_patch *patch)
290 {
291         __klp_unpatch_objects(patch, false);
292 }
293 
294 void klp_unpatch_objects_dynamic(struct klp_patch *patch)
295 {
296         __klp_unpatch_objects(patch, true);
297 }
298 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp