|
15 | 15 | #define TASK_SIZE_MAX TASK_SIZE_USER64 |
16 | 16 | #endif |
17 | 17 |
|
| 18 | +/* Threshold above which VMX copy path is used */ |
| 19 | +#define VMX_COPY_THRESHOLD 3328 |
| 20 | + |
18 | 21 | #include <asm-generic/access_ok.h> |
19 | 22 |
|
20 | 23 | /* |
@@ -326,40 +329,96 @@ do { \ |
326 | 329 | extern unsigned long __copy_tofrom_user(void __user *to, |
327 | 330 | const void __user *from, unsigned long size); |
328 | 331 |
|
329 | | -#ifdef __powerpc64__ |
330 | | -static inline unsigned long |
331 | | -raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) |
| 332 | +enum usercopy_mode { |
| 333 | + USERCOPY_IN, |
| 334 | + USERCOPY_FROM, |
| 335 | + USERCOPY_TO, |
| 336 | +}; |
| 337 | + |
| 338 | +unsigned long __copy_tofrom_user_vmx(void __user *to, const void __user *from, |
| 339 | + unsigned long size, enum usercopy_mode mode); |
| 340 | + |
| 341 | +unsigned long __copy_tofrom_user_base(void __user *to, |
| 342 | + const void __user *from, unsigned long size); |
| 343 | + |
| 344 | +unsigned long __copy_tofrom_user_power7_vmx(void __user *to, |
| 345 | + const void __user *from, unsigned long size); |
| 346 | + |
| 347 | + |
| 348 | +static inline bool will_use_vmx(unsigned long n) |
| 349 | +{ |
| 350 | + return IS_ENABLED(CONFIG_ALTIVEC) && |
| 351 | + cpu_has_feature(CPU_FTR_VMX_COPY) && |
| 352 | + n > VMX_COPY_THRESHOLD; |
| 353 | +} |
| 354 | + |
| 355 | +static inline void raw_copy_allow(void __user *to, enum usercopy_mode mode) |
| 356 | +{ |
| 357 | + switch (mode) { |
| 358 | + case USERCOPY_IN: |
| 359 | + allow_user_access(to, KUAP_READ_WRITE); |
| 360 | + break; |
| 361 | + case USERCOPY_FROM: |
| 362 | + allow_user_access(NULL, KUAP_READ); |
| 363 | + break; |
| 364 | + case USERCOPY_TO: |
| 365 | + allow_user_access(to, KUAP_WRITE); |
| 366 | + break; |
| 367 | + } |
| 368 | +} |
| 369 | + |
| 370 | +static inline void raw_copy_prevent(enum usercopy_mode mode) |
| 371 | +{ |
| 372 | + switch (mode) { |
| 373 | + case USERCOPY_IN: |
| 374 | + prevent_user_access(KUAP_READ_WRITE); |
| 375 | + break; |
| 376 | + case USERCOPY_FROM: |
| 377 | + prevent_user_access(KUAP_READ); |
| 378 | + break; |
| 379 | + case USERCOPY_TO: |
| 380 | + prevent_user_access(KUAP_WRITE); |
| 381 | + break; |
| 382 | + } |
| 383 | +} |
| 384 | + |
| 385 | +static inline unsigned long raw_copy_tofrom_user(void __user *to, |
| 386 | + const void __user *from, unsigned long n, |
| 387 | + enum usercopy_mode mode) |
332 | 388 | { |
333 | 389 | unsigned long ret; |
334 | 390 |
|
335 | | - barrier_nospec(); |
336 | | - allow_user_access(to, KUAP_READ_WRITE); |
| 391 | + if (will_use_vmx(n)) |
| 392 | + return __copy_tofrom_user_vmx(to, from, n, mode); |
| 393 | + |
| 394 | + raw_copy_allow(to, mode); |
337 | 395 | ret = __copy_tofrom_user(to, from, n); |
338 | | - prevent_user_access(KUAP_READ_WRITE); |
| 396 | + raw_copy_prevent(mode); |
339 | 397 | return ret; |
| 398 | + |
| 399 | +} |
| 400 | + |
| 401 | +#ifdef __powerpc64__ |
| 402 | +static inline unsigned long |
| 403 | +raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) |
| 404 | +{ |
| 405 | + barrier_nospec(); |
| 406 | + return raw_copy_tofrom_user(to, from, n, USERCOPY_IN); |
340 | 407 | } |
341 | 408 | #endif /* __powerpc64__ */ |
342 | 409 |
|
343 | 410 | static inline unsigned long raw_copy_from_user(void *to, |
344 | 411 | const void __user *from, unsigned long n) |
345 | 412 | { |
346 | | - unsigned long ret; |
347 | | - |
348 | | - allow_user_access(NULL, KUAP_READ); |
349 | | - ret = __copy_tofrom_user((__force void __user *)to, from, n); |
350 | | - prevent_user_access(KUAP_READ); |
351 | | - return ret; |
| 413 | + return raw_copy_tofrom_user((__force void __user *)to, from, |
| 414 | + n, USERCOPY_FROM); |
352 | 415 | } |
353 | 416 |
|
354 | 417 | static inline unsigned long |
355 | 418 | raw_copy_to_user(void __user *to, const void *from, unsigned long n) |
356 | 419 | { |
357 | | - unsigned long ret; |
358 | | - |
359 | | - allow_user_access(to, KUAP_WRITE); |
360 | | - ret = __copy_tofrom_user(to, (__force const void __user *)from, n); |
361 | | - prevent_user_access(KUAP_WRITE); |
362 | | - return ret; |
| 420 | + return raw_copy_tofrom_user(to, (__force const void __user *)from, |
| 421 | + n, USERCOPY_TO); |
363 | 422 | } |
364 | 423 |
|
365 | 424 | unsigned long __arch_clear_user(void __user *addr, unsigned long size); |
|
0 commit comments