|
40 | 40 | #include <linux/shmem_fs.h>
|
41 | 41 | #include <linux/dtrace_task_impl.h>
|
42 | 42 |
|
| 43 | +#if IS_ENABLED(CONFIG_DT_FASTTRAP) |
| 44 | +# include <linux/uprobes.h> |
| 45 | +#endif /* CONFIG_DT_FASTTRAP || CONFIG_DT_FASTTRAP_MODULE */ |
| 46 | + |
43 | 47 | /*
|
44 | 48 | * OS SPECIFIC DTRACE SETUP
|
45 | 49 | */
|
@@ -419,3 +423,270 @@ void dtrace_disable(void)
|
419 | 423 | dtrace_enabled = 0;
|
420 | 424 | }
|
421 | 425 | EXPORT_SYMBOL(dtrace_disable);
|
| 426 | + |
| 427 | +/* |
| 428 | + * USER SPACE TRACING (FASTTRAP) SUPPORT |
| 429 | + */ |
| 430 | + |
| 431 | +#if IS_ENABLED(CONFIG_DT_FASTTRAP) |
| 432 | +int (*dtrace_tracepoint_hit)(fasttrap_machtp_t *, struct pt_regs *, int); |
| 433 | +EXPORT_SYMBOL(dtrace_tracepoint_hit); |
| 434 | + |
| 435 | +struct task_struct *register_pid_provider(pid_t pid) |
| 436 | +{ |
| 437 | + struct task_struct *p; |
| 438 | + |
| 439 | + /* |
| 440 | + * Make sure the process exists, (FIXME: isn't a child created as the |
| 441 | + * result of a vfork(2)), and isn't a zombie (but may be in fork). |
| 442 | + */ |
| 443 | + rcu_read_lock(); |
| 444 | + p = find_task_by_vpid(pid); |
| 445 | + if (p == NULL) { |
| 446 | + rcu_read_unlock(); |
| 447 | + return NULL; |
| 448 | + } |
| 449 | + |
| 450 | + get_task_struct(p); |
| 451 | + rcu_read_unlock(); |
| 452 | + |
| 453 | + if (p->state & TASK_DEAD || p->dt_task == NULL || |
| 454 | + p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) { |
| 455 | + put_task_struct(p); |
| 456 | + return NULL; |
| 457 | + } |
| 458 | + |
| 459 | + /* |
| 460 | + * Increment dtrace_probes so that the process knows to inform us |
| 461 | + * when it exits or execs. fasttrap_provider_free() decrements this |
| 462 | + * when we're done with this provider. |
| 463 | + */ |
| 464 | + if (p->dt_task != NULL) |
| 465 | + p->dt_task->dt_probes++; |
| 466 | + put_task_struct(p); |
| 467 | + |
| 468 | + return p; |
| 469 | +} |
| 470 | +EXPORT_SYMBOL(register_pid_provider); |
| 471 | + |
| 472 | +void unregister_pid_provider(pid_t pid) |
| 473 | +{ |
| 474 | + struct task_struct *p; |
| 475 | + |
| 476 | + /* |
| 477 | + * Decrement dtrace_probes on the process whose provider we're |
| 478 | + * freeing. We don't have to worry about clobbering somone else's |
| 479 | + * modifications to it because we have locked the bucket that |
| 480 | + * corresponds to this process's hash chain in the provider hash |
| 481 | + * table. Don't sweat it if we can't find the process. |
| 482 | + */ |
| 483 | + rcu_read_lock(); |
| 484 | + read_lock(&tasklist_lock); |
| 485 | + if ((p = find_task_by_vpid(pid)) == NULL) { |
| 486 | + read_unlock(&tasklist_lock); |
| 487 | + rcu_read_unlock(); |
| 488 | + return; |
| 489 | + } |
| 490 | + |
| 491 | + get_task_struct(p); |
| 492 | + read_unlock(&tasklist_lock); |
| 493 | + rcu_read_unlock(); |
| 494 | + |
| 495 | + if (p->dt_task != NULL) |
| 496 | + p->dt_task->dt_probes--; |
| 497 | + put_task_struct(p); |
| 498 | +} |
| 499 | +EXPORT_SYMBOL(unregister_pid_provider); |
| 500 | + |
| 501 | +int dtrace_copy_code(pid_t pid, uint8_t *buf, uintptr_t addr, size_t size) |
| 502 | +{ |
| 503 | + struct task_struct *p; |
| 504 | + struct inode *ino; |
| 505 | + struct vm_area_struct *vma; |
| 506 | + struct address_space *map; |
| 507 | + loff_t off; |
| 508 | + int rc = 0; |
| 509 | + |
| 510 | + /* |
| 511 | + * First we determine the inode and offset that 'addr' refers to in the |
| 512 | + * task referenced by 'pid'. |
| 513 | + */ |
| 514 | + rcu_read_lock(); |
| 515 | + p = find_task_by_vpid(pid); |
| 516 | + if (!p) { |
| 517 | + rcu_read_unlock(); |
| 518 | + pr_warn("PID %d not found\n", pid); |
| 519 | + return -ESRCH; |
| 520 | + } |
| 521 | + get_task_struct(p); |
| 522 | + rcu_read_unlock(); |
| 523 | + |
| 524 | + down_write(&p->mm->mmap_sem); |
| 525 | + vma = find_vma(p->mm, addr); |
| 526 | + if (vma == NULL || vma->vm_file == NULL) { |
| 527 | + rc = -EFAULT; |
| 528 | + goto out; |
| 529 | + } |
| 530 | + |
| 531 | + ino = vma->vm_file->f_mapping->host; |
| 532 | + map = ino->i_mapping; |
| 533 | + off = ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (addr - vma->vm_start); |
| 534 | + |
| 535 | + if (map->a_ops->readpage == NULL && !shmem_mapping(ino->i_mapping)) { |
| 536 | + rc = -EIO; |
| 537 | + goto out; |
| 538 | + } |
| 539 | + |
| 540 | + /* |
| 541 | + * Armed with inode and offset, we can start reading pages... |
| 542 | + */ |
| 543 | + do { |
| 544 | + int len; |
| 545 | + struct page *page; |
| 546 | + void *kaddr; |
| 547 | + |
| 548 | + /* |
| 549 | + * We cannot read beyond the end of the inode content. |
| 550 | + */ |
| 551 | + if (off >= i_size_read(ino)) |
| 552 | + break; |
| 553 | + |
| 554 | + len = min_t(int, size, PAGE_SIZE - (off & ~PAGE_MASK)); |
| 555 | + |
| 556 | + /* |
| 557 | + * Make sure that the page we're tring to read is populated and |
| 558 | + * in page cache. |
| 559 | + */ |
| 560 | + if (map->a_ops->readpage) |
| 561 | + page = read_mapping_page(map, off >> PAGE_SHIFT, |
| 562 | + vma->vm_file); |
| 563 | + else |
| 564 | + page = shmem_read_mapping_page(map, off >> PAGE_SHIFT); |
| 565 | + |
| 566 | + if (IS_ERR(page)) { |
| 567 | + rc = PTR_ERR(page); |
| 568 | + break; |
| 569 | + } |
| 570 | + |
| 571 | + kaddr = kmap_atomic(page); |
| 572 | + memcpy(buf, kaddr + (off & ~PAGE_MASK), len); |
| 573 | + kunmap_atomic(kaddr); |
| 574 | + put_page(page); |
| 575 | + |
| 576 | + buf += len; |
| 577 | + off += len; |
| 578 | + size -= len; |
| 579 | + } while (size > 0); |
| 580 | + |
| 581 | +out: |
| 582 | + up_write(&p->mm->mmap_sem); |
| 583 | + put_task_struct(p); |
| 584 | + |
| 585 | + return rc; |
| 586 | +} |
| 587 | +EXPORT_SYMBOL(dtrace_copy_code); |
| 588 | + |
| 589 | +static int handler(struct uprobe_consumer *self, struct pt_regs *regs, |
| 590 | + int is_ret) |
| 591 | +{ |
| 592 | + fasttrap_machtp_t *mtp = container_of(self, fasttrap_machtp_t, |
| 593 | + fmtp_cns); |
| 594 | + int rc = 0; |
| 595 | + |
| 596 | + read_lock(&this_cpu_core->cpu_ft_lock); |
| 597 | + if (dtrace_tracepoint_hit == NULL) |
| 598 | + pr_warn("Fasttrap probes, but no handler\n"); |
| 599 | + else |
| 600 | + rc = (*dtrace_tracepoint_hit)(mtp, regs, is_ret); |
| 601 | + read_unlock(&this_cpu_core->cpu_ft_lock); |
| 602 | + |
| 603 | + return rc; |
| 604 | +} |
| 605 | + |
| 606 | +static int prb_handler(struct uprobe_consumer *self, struct pt_regs *regs) |
| 607 | +{ |
| 608 | + return handler(self, regs, 0); |
| 609 | +} |
| 610 | + |
| 611 | +static int ret_handler(struct uprobe_consumer *self, unsigned long func, |
| 612 | + struct pt_regs *regs) |
| 613 | +{ |
| 614 | + return handler(self, regs, 1); |
| 615 | +} |
| 616 | + |
| 617 | +int dtrace_tracepoint_enable(pid_t pid, uintptr_t addr, int is_ret, |
| 618 | + fasttrap_machtp_t *mtp) |
| 619 | +{ |
| 620 | + struct task_struct *p; |
| 621 | + struct inode *ino; |
| 622 | + struct vm_area_struct *vma; |
| 623 | + loff_t off; |
| 624 | + int rc = 0; |
| 625 | + |
| 626 | + mtp->fmtp_ino = NULL; |
| 627 | + mtp->fmtp_off = 0; |
| 628 | + |
| 629 | + p = find_task_by_vpid(pid); |
| 630 | + if (!p) { |
| 631 | + pr_warn("PID %d not found\n", pid); |
| 632 | + return -ESRCH; |
| 633 | + } |
| 634 | + |
| 635 | + if (p->dt_task == NULL) { |
| 636 | + pr_warn("PID %d no dtrace_task\n", pid); |
| 637 | + return -EFAULT; |
| 638 | + } |
| 639 | + |
| 640 | + vma = find_vma(p->mm, addr); |
| 641 | + if (vma == NULL || vma->vm_file == NULL) |
| 642 | + return -EFAULT; |
| 643 | + |
| 644 | + ino = vma->vm_file->f_mapping->host; |
| 645 | + off = ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (addr - vma->vm_start); |
| 646 | + |
| 647 | + if (is_ret) |
| 648 | + mtp->fmtp_cns.ret_handler = ret_handler; |
| 649 | + else |
| 650 | + mtp->fmtp_cns.handler = prb_handler; |
| 651 | + |
| 652 | + rc = uprobe_register(ino, off, &mtp->fmtp_cns); |
| 653 | + |
| 654 | + /* |
| 655 | + * If successful, increment the count of the number of |
| 656 | + * tracepoints active in the victim process. |
| 657 | + */ |
| 658 | + if (rc == 0) { |
| 659 | + mtp->fmtp_ino = ino; |
| 660 | + mtp->fmtp_off = off; |
| 661 | + |
| 662 | + p->dt_task->dt_tp_count++; |
| 663 | + } |
| 664 | + |
| 665 | + return rc; |
| 666 | +} |
| 667 | +EXPORT_SYMBOL(dtrace_tracepoint_enable); |
| 668 | + |
| 669 | +int dtrace_tracepoint_disable(pid_t pid, fasttrap_machtp_t *mtp) |
| 670 | +{ |
| 671 | + struct task_struct *p; |
| 672 | + |
| 673 | + if (!mtp || !mtp->fmtp_ino) |
| 674 | + return -ENOENT; |
| 675 | + |
| 676 | + uprobe_unregister(mtp->fmtp_ino, mtp->fmtp_off, &mtp->fmtp_cns); |
| 677 | + |
| 678 | + mtp->fmtp_ino = NULL; |
| 679 | + mtp->fmtp_off = 0; |
| 680 | + |
| 681 | + /* |
| 682 | + * Decrement the count of the number of tracepoints active in |
| 683 | + * the victim process (if it still exists). |
| 684 | + */ |
| 685 | + p = find_task_by_vpid(pid); |
| 686 | + if (p != NULL && p->dt_task != NULL) |
| 687 | + p->dt_task->dt_tp_count--; |
| 688 | + |
| 689 | + return 0; |
| 690 | +} |
| 691 | +EXPORT_SYMBOL(dtrace_tracepoint_disable); |
| 692 | +#endif /* CONFIG_DT_FASTTRAP || CONFIG_DT_FASTTRAP_MODULE */ |
0 commit comments