aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRuslan Bukin <br@FreeBSD.org>2018-03-19 18:34:08 +0000
committerRuslan Bukin <br@FreeBSD.org>2018-03-19 18:34:08 +0000
commit766f5c51c3151507d3be26d606710d708302d8b2 (patch)
tree515035e74d1880c27ceb0cfa619f5066bb2a33b3
Notes
-rw-r--r--.gitignore7
-rw-r--r--CMakeLists.txt304
-rwxr-xr-xCONTRIBUTING107
-rw-r--r--LICENSE24
-rw-r--r--README79
-rwxr-xr-xdoc/getting_started.md93
-rwxr-xr-xdoc/howto_build.md197
-rw-r--r--doc/howto_capture.md628
-rw-r--r--doc/howto_libipt.md1271
-rwxr-xr-xdoc/howto_pttc.md482
-rw-r--r--doc/man/CMakeLists.txt147
-rw-r--r--doc/man/pt_alloc_encoder.3.md96
-rw-r--r--doc/man/pt_blk_alloc_decoder.3.md98
-rw-r--r--doc/man/pt_blk_get_offset.3.md82
-rw-r--r--doc/man/pt_blk_next.3.md285
-rw-r--r--doc/man/pt_blk_sync_forward.3.md152
-rw-r--r--doc/man/pt_config.3.md359
-rw-r--r--doc/man/pt_enc_get_config.3.md77
-rw-r--r--doc/man/pt_enc_get_offset.3.md77
-rw-r--r--doc/man/pt_image_add_file.3.md135
-rw-r--r--doc/man/pt_image_alloc.3.md99
-rw-r--r--doc/man/pt_image_remove_by_filename.3.md150
-rw-r--r--doc/man/pt_image_set_callback.3.md103
-rw-r--r--doc/man/pt_insn_alloc_decoder.3.md101
-rw-r--r--doc/man/pt_insn_get_image.3.md93
-rw-r--r--doc/man/pt_insn_get_offset.3.md82
-rw-r--r--doc/man/pt_insn_next.3.md264
-rw-r--r--doc/man/pt_insn_sync_forward.3.md153
-rw-r--r--doc/man/pt_iscache_add_file.3.md98
-rw-r--r--doc/man/pt_iscache_alloc.3.md102
-rw-r--r--doc/man/pt_iscache_read.3.md89
-rw-r--r--doc/man/pt_iscache_set_limit.3.md73
-rw-r--r--doc/man/pt_library_version.3.md72
-rw-r--r--doc/man/pt_packet.3.md197
-rw-r--r--doc/man/pt_pkt_alloc_decoder.3.md98
-rw-r--r--doc/man/pt_pkt_get_offset.3.md81
-rw-r--r--doc/man/pt_pkt_sync_forward.3.md115
-rw-r--r--doc/man/pt_qry_alloc_decoder.3.md113
-rw-r--r--doc/man/pt_qry_cond_branch.3.md152
-rw-r--r--doc/man/pt_qry_event.3.md291
-rw-r--r--doc/man/pt_qry_get_offset.3.md83
-rw-r--r--doc/man/pt_qry_sync_forward.3.md152
-rw-r--r--doc/man/pt_qry_time.3.md128
-rw-r--r--include/posix/threads.h259
-rw-r--r--include/pt_compiler.h47
-rw-r--r--include/windows/inttypes.h65
-rw-r--r--include/windows/threads.h239
-rw-r--r--libipt/CMakeLists.txt172
-rwxr-xr-xlibipt/include/intel-pt.h.in2463
-rw-r--r--libipt/internal/include/posix/pt_section_posix.h100
-rw-r--r--libipt/internal/include/pt_asid.h74
-rw-r--r--libipt/internal/include/pt_block_cache.h225
-rw-r--r--libipt/internal/include/pt_block_decoder.h143
-rw-r--r--libipt/internal/include/pt_config.h82
-rw-r--r--libipt/internal/include/pt_cpu.h54
-rw-r--r--libipt/internal/include/pt_cpuid.h40
-rw-r--r--libipt/internal/include/pt_decoder_function.h129
-rw-r--r--libipt/internal/include/pt_encoder.h125
-rw-r--r--libipt/internal/include/pt_event_queue.h143
-rw-r--r--libipt/internal/include/pt_ild.h128
-rw-r--r--libipt/internal/include/pt_image.h140
-rw-r--r--libipt/internal/include/pt_image_section_cache.h206
-rw-r--r--libipt/internal/include/pt_insn.h212
-rw-r--r--libipt/internal/include/pt_insn_decoder.h139
-rw-r--r--libipt/internal/include/pt_last_ip.h79
-rw-r--r--libipt/internal/include/pt_mapped_section.h199
-rw-r--r--libipt/internal/include/pt_msec_cache.h95
-rw-r--r--libipt/internal/include/pt_opcodes.h397
-rw-r--r--libipt/internal/include/pt_packet.h111
-rw-r--r--libipt/internal/include/pt_packet_decoder.h92
-rw-r--r--libipt/internal/include/pt_query_decoder.h134
-rw-r--r--libipt/internal/include/pt_retstack.h87
-rw-r--r--libipt/internal/include/pt_section.h392
-rw-r--r--libipt/internal/include/pt_section_file.h106
-rw-r--r--libipt/internal/include/pt_sync.h71
-rw-r--r--libipt/internal/include/pt_time.h232
-rw-r--r--libipt/internal/include/pt_tnt_cache.h88
-rw-r--r--libipt/internal/include/pti-disp-defs.h39
-rw-r--r--libipt/internal/include/pti-disp.h544
-rw-r--r--libipt/internal/include/pti-imm-defs.h46
-rw-r--r--libipt/internal/include/pti-imm.h544
-rw-r--r--libipt/internal/include/pti-modrm-defs.h38
-rw-r--r--libipt/internal/include/pti-modrm.h544
-rw-r--r--libipt/internal/include/windows/pt_section_windows.h111
-rw-r--r--libipt/src/posix/init.c36
-rw-r--r--libipt/src/posix/pt_cpuid.c37
-rw-r--r--libipt/src/posix/pt_section_posix.c326
-rw-r--r--libipt/src/pt_asid.c106
-rw-r--r--libipt/src/pt_block_cache.c96
-rw-r--r--libipt/src/pt_block_decoder.c3469
-rw-r--r--libipt/src/pt_config.c251
-rw-r--r--libipt/src/pt_cpu.c164
-rw-r--r--libipt/src/pt_decoder_function.c379
-rw-r--r--libipt/src/pt_encoder.c917
-rw-r--r--libipt/src/pt_error.c122
-rw-r--r--libipt/src/pt_event_queue.c203
-rw-r--r--libipt/src/pt_ild.c1223
-rw-r--r--libipt/src/pt_image.c718
-rw-r--r--libipt/src/pt_image_section_cache.c1091
-rw-r--r--libipt/src/pt_insn.c372
-rw-r--r--libipt/src/pt_insn_decoder.c1765
-rw-r--r--libipt/src/pt_last_ip.c127
-rw-r--r--libipt/src/pt_msec_cache.c136
-rw-r--r--libipt/src/pt_packet.c573
-rw-r--r--libipt/src/pt_packet_decoder.c723
-rw-r--r--libipt/src/pt_query_decoder.c3630
-rw-r--r--libipt/src/pt_retstack.c94
-rw-r--r--libipt/src/pt_section.c643
-rw-r--r--libipt/src/pt_section_file.c255
-rw-r--r--libipt/src/pt_sync.c241
-rw-r--r--libipt/src/pt_time.c674
-rw-r--r--libipt/src/pt_tnt_cache.c89
-rw-r--r--libipt/src/pt_version.c43
-rw-r--r--libipt/src/windows/init.c51
-rw-r--r--libipt/src/windows/pt_cpuid.c43
-rw-r--r--libipt/src/windows/pt_section_windows.c397
-rw-r--r--libipt/test/src/ptunit-asid.c425
-rw-r--r--libipt/test/src/ptunit-block_cache.c370
-rw-r--r--libipt/test/src/ptunit-config.c496
-rw-r--r--libipt/test/src/ptunit-cpp.cpp78
-rw-r--r--libipt/test/src/ptunit-cpu.c173
-rw-r--r--libipt/test/src/ptunit-event_queue.c470
-rw-r--r--libipt/test/src/ptunit-fetch.c693
-rw-r--r--libipt/test/src/ptunit-ild.c759
-rw-r--r--libipt/test/src/ptunit-image.c2286
-rw-r--r--libipt/test/src/ptunit-image_section_cache.c2027
-rw-r--r--libipt/test/src/ptunit-last_ip.c374
-rw-r--r--libipt/test/src/ptunit-mapped_section.c198
-rw-r--r--libipt/test/src/ptunit-msec_cache.c419
-rw-r--r--libipt/test/src/ptunit-packet.c859
-rw-r--r--libipt/test/src/ptunit-query.c2873
-rw-r--r--libipt/test/src/ptunit-retstack.c232
-rw-r--r--libipt/test/src/ptunit-section-file.c192
-rw-r--r--libipt/test/src/ptunit-section.c1396
-rw-r--r--libipt/test/src/ptunit-sync.c306
-rw-r--r--libipt/test/src/ptunit-time.c368
-rw-r--r--libipt/test/src/ptunit-tnt_cache.c246
-rw-r--r--pevent/CMakeLists.txt35
-rw-r--r--pevent/include/pevent.h268
-rw-r--r--pevent/src/pevent.c664
-rw-r--r--pevent/test/src/ptunit-pevent.c799
-rw-r--r--ptdump/CMakeLists.txt54
-rw-r--r--ptdump/src/ptdump.c1951
-rw-r--r--pttc/CMakeLists.txt68
-rw-r--r--pttc/include/errcode.h105
-rw-r--r--pttc/include/file.h143
-rw-r--r--pttc/include/parse.h235
-rw-r--r--pttc/include/pttc.h49
-rw-r--r--pttc/include/util.h124
-rw-r--r--pttc/include/yasm.h259
-rw-r--r--pttc/src/errcode.c85
-rw-r--r--pttc/src/file.c314
-rw-r--r--pttc/src/main.c137
-rw-r--r--pttc/src/parse.c2779
-rw-r--r--pttc/src/posix/util.c67
-rw-r--r--pttc/src/pttc.c62
-rw-r--r--pttc/src/util.c240
-rw-r--r--pttc/src/windows/util.c137
-rw-r--r--pttc/src/yasm.c848
-rw-r--r--pttc/test/src/test_all_directives.ptt58
-rw-r--r--pttc/test/src/test_exp_labels.ptt58
-rw-r--r--pttc/test/src/test_label_addr.ptt31
-rw-r--r--ptunit/CMakeLists.txt43
-rw-r--r--ptunit/include/ptunit.h463
-rw-r--r--ptunit/include/ptunit_mkfile.h48
-rw-r--r--ptunit/include/ptunit_threads.h158
-rw-r--r--ptunit/src/posix/ptunit_mkfile.c79
-rw-r--r--ptunit/src/ptunit.c345
-rw-r--r--ptunit/src/windows/ptunit_mkfile.c72
-rw-r--r--ptunit/test/src/ptunit-selftest.c469
-rw-r--r--ptxed/CMakeLists.txt79
-rw-r--r--ptxed/include/load_elf.h64
-rw-r--r--ptxed/src/load_elf.c359
-rw-r--r--ptxed/src/ptxed.c2829
-rwxr-xr-xscript/perf-copy-mapped-files.bash275
-rwxr-xr-xscript/perf-get-opts.bash215
-rwxr-xr-xscript/perf-read-aux.bash124
-rwxr-xr-xscript/perf-read-sideband.bash150
-rwxr-xr-xscript/test.bash275
-rw-r--r--sideband/CMakeLists.txt65
-rw-r--r--sideband/include/libipt-sb.h.in530
-rw-r--r--sideband/internal/include/pt_sb_context.h99
-rw-r--r--sideband/internal/include/pt_sb_decoder.h69
-rw-r--r--sideband/internal/include/pt_sb_file.h47
-rw-r--r--sideband/internal/include/pt_sb_pevent.h155
-rw-r--r--sideband/internal/include/pt_sb_session.h101
-rw-r--r--sideband/src/pt_sb_context.c162
-rw-r--r--sideband/src/pt_sb_file.c99
-rw-r--r--sideband/src/pt_sb_pevent.c1710
-rw-r--r--sideband/src/pt_sb_session.c623
-rw-r--r--test/CMakeLists.txt63
-rw-r--r--test/pevent/CMakeLists.txt40
-rw-r--r--test/pevent/src/pevent-comm_exec-mmap-tsc-iret.ptt99
-rw-r--r--test/pevent/src/pevent-dump.ptt83
-rw-r--r--test/pevent/src/pevent-dump_verbose.ptt218
-rw-r--r--test/pevent/src/pevent-fork.ptt91
-rw-r--r--test/pevent/src/pevent-mmap-tip_cached.ptt93
-rw-r--r--test/pevent/src/pevent-mmap_secondary-tsc.ptt101
-rw-r--r--test/pevent/src/pevent-split.ptt135
-rw-r--r--test/pevent/src/pevent-tip_pgd-comm_exec-mmap-tsc-tip_pge.ptt98
-rw-r--r--test/pevent/src/pevent-tip_pgd-mmap-tsc-tip_pge.ptt104
-rw-r--r--test/pevent/src/pevent-tip_pgd-switch-tsc-tip_pge.ptt110
-rw-r--r--test/pevent/src/pevent-tip_pgd-switch_cpu_wide-tsc-tip_pge.ptt109
-rw-r--r--test/pevent/src/pevent-warn.ptt79
-rw-r--r--test/src/apl11.ptt80
-rw-r--r--test/src/apl12-psb.ptt87
-rw-r--r--test/src/apl12-tip_pge.ptt89
-rw-r--r--test/src/bad_cpu.ptt60
-rw-r--r--test/src/bdm64-tip-xabort.ptt97
-rw-r--r--test/src/bdm64-tnt-cond-xabort.ptt107
-rw-r--r--test/src/bdm64-tnt-ind_call-xabort.ptt107
-rw-r--r--test/src/bdm70-psb_fup-tip_pge.ptt79
-rw-r--r--test/src/bdm70-tip_pgd-psb_fup-tip_pge.ptt97
-rw-r--r--test/src/call_direct-ret_compressed-pic.ptt68
-rw-r--r--test/src/call_direct-ret_compressed.ptt62
-rw-r--r--test/src/call_direct-ret_uncompressed.ptt61
-rw-r--r--test/src/call_indirect-ret_compressed.ptt63
-rw-r--r--test/src/call_indirect-ret_uncompressed.ptt63
-rw-r--r--test/src/call_indirect_deferred-ret_compressed.ptt65
-rw-r--r--test/src/cbr-cyc.ptt55
-rw-r--r--test/src/cbr-mtc-cyc-mtc.ptt54
-rw-r--r--test/src/cbr-tsc-cyc-tma.ptt57
-rw-r--r--test/src/cbr-tsc-tma-mtc-cyc.ptt56
-rw-r--r--test/src/direct_call-tip_pgd_noip-syscall.ptt60
-rw-r--r--test/src/direct_jump-tip_pgd_noip-far_call.ptt61
-rw-r--r--test/src/dump-all-packets.ptt143
-rw-r--r--test/src/exstop_ip-tip_pgd.ptt65
-rw-r--r--test/src/fup-pip-vmcs-tip.ptt71
-rw-r--r--test/src/fup-pip-vmcs-tip_pgd.ptt65
-rw-r--r--test/src/fup-tip-eos.ptt58
-rw-r--r--test/src/fup-tip-fup-tip_pgd.ptt67
-rw-r--r--test/src/fup-tip.ptt70
-rw-r--r--test/src/fup-tip_pgd-stop.ptt60
-rw-r--r--test/src/fup-tip_pgd-tip_pge.ptt63
-rw-r--r--test/src/fup-tip_pgd-tip_pge_other_ip.ptt66
-rw-r--r--test/src/fup-tip_pgd.ptt56
-rw-r--r--test/src/fup-tip_pgd_noip.ptt56
-rw-r--r--test/src/int-iret-cpl_0.ptt63
-rw-r--r--test/src/int-iret-cpl_3.ptt94
-rw-r--r--test/src/int-iret.ptt96
-rw-r--r--test/src/linear-fup-tip_pgd.ptt59
-rw-r--r--test/src/linear-tip.ptt65
-rw-r--r--test/src/loop-tnt-64.ptt193
-rw-r--r--test/src/loop-tnt-tnt.ptt90
-rw-r--r--test/src/loop-tnt.ptt70
-rw-r--r--test/src/mode_exec-tip.ptt67
-rw-r--r--test/src/mtc-cyc_calibrate.ptt56
-rw-r--r--test/src/mtc.ptt50
-rw-r--r--test/src/mwait-pwre-exstop_ip-fup-ovf.ptt64
-rw-r--r--test/src/mwait-pwre-exstop_ip-ovf.ptt57
-rw-r--r--test/src/mwait-pwre-exstop_ip-pwrx.ptt67
-rw-r--r--test/src/ovf-fup.ptt64
-rw-r--r--test/src/ovf-mnt-fup.ptt69
-rw-r--r--test/src/ovf-mnt-tip_pge.ptt73
-rw-r--r--test/src/ovf-pwre-pwrx-tip_pge.ptt67
-rw-r--r--test/src/ovf-timing-fup.ptt77
-rw-r--r--test/src/ovf-timing-tip_pge.ptt81
-rw-r--r--test/src/ovf-tip_pge.ptt68
-rw-r--r--test/src/ovf.ptt50
-rw-r--r--test/src/pip-far_call.ptt68
-rw-r--r--test/src/pip-pip_mov_cr3-fail.ptt61
-rwxr-xr-xtest/src/pip-vmcs-tip_pgd.ptt60
-rw-r--r--test/src/pip_mov_cr3-pip_mov_cr3.ptt66
-rw-r--r--test/src/psb-empty.ptt45
-rw-r--r--test/src/psb-exstop.ptt61
-rw-r--r--test/src/psb-fup-psbend.ptt53
-rw-r--r--test/src/psb-fup-tip_pgd-stop.ptt56
-rw-r--r--test/src/psb-fup-tip_pgd.ptt54
-rw-r--r--test/src/psb-mnt-fup-psbend.ptt55
-rw-r--r--test/src/psb-mnt-psbend.ptt50
-rw-r--r--test/src/psb-ovf-fup.ptt61
-rw-r--r--test/src/psb-ovf-tip_pge.ptt66
-rw-r--r--test/src/psb-pip-psb.ptt55
-rw-r--r--test/src/psb-pip-tip_pge.ptt62
-rw-r--r--test/src/psb-psb.ptt64
-rw-r--r--test/src/psb-stop.ptt48
-rw-r--r--test/src/psb-tnt-psb.ptt69
-rw-r--r--test/src/psb-tsx.ptt57
-rw-r--r--test/src/psb-tsx_abort-tip-fup-tip_pgd.ptt70
-rw-r--r--test/src/psb-tsx_abort-tip_pgd.ptt61
-rw-r--r--test/src/psb-tsx_abort.ptt69
-rw-r--r--test/src/psb-vmcs.ptt46
-rw-r--r--test/src/psb_nofup-psb.ptt61
-rw-r--r--test/src/ptdump-exec-mode.ptt46
-rw-r--r--test/src/ptdump-last-ip.ptt55
-rw-r--r--test/src/ptdump-no-offset-raw.ptt45
-rw-r--r--test/src/ptdump-no-offset.ptt45
-rw-r--r--test/src/ptw-fup.ptt59
-rw-r--r--test/src/ptw.ptt56
-rw-r--r--test/src/ptxed-block-stat.ptt63
-rw-r--r--test/src/ptxed-block-stat_blocks.ptt62
-rw-r--r--test/src/ptxed-end_on_call-fup-tip.ptt73
-rw-r--r--test/src/ptxed-end_on_call-fup-tip_pgd.ptt66
-rw-r--r--test/src/ptxed-end_on_call-ret_tip.ptt82
-rw-r--r--test/src/ptxed-end_on_call-ret_tnt.ptt72
-rw-r--r--test/src/ptxed-end_on_call-tip_pgd.ptt65
-rw-r--r--test/src/ptxed-end_on_jump-fup-tip_pgd.ptt65
-rw-r--r--test/src/ptxed-insn-stat.ptt63
-rw-r--r--test/src/ptxed-stat_insn.ptt63
-rw-r--r--test/src/ptxed-tick.ptt98
-rw-r--r--test/src/pwre-exstop_ip-pwrx.ptt72
-rw-r--r--test/src/ret_near_far.ptt361
-rw-r--r--test/src/skd007.ptt81
-rw-r--r--test/src/skd010-mode_tsx-fup.ptt76
-rw-r--r--test/src/skd010-psb.ptt79
-rw-r--r--test/src/skd010-tip.ptt73
-rw-r--r--test/src/skd010-tip_pgd.ptt84
-rw-r--r--test/src/skd022.ptt81
-rw-r--r--test/src/skl014-call.ptt70
-rw-r--r--test/src/skl014-jmp-jmp.ptt74
-rw-r--r--test/src/skl014-jmp.ptt70
-rw-r--r--test/src/skl014-no_filter.ptt63
-rw-r--r--test/src/syscall-sysret-cpl_0.ptt63
-rw-r--r--test/src/syscall-sysret-cpl_3.ptt71
-rw-r--r--test/src/syscall-sysret.ptt71
-rw-r--r--test/src/sysenter-sysexit-cpl_0.ptt63
-rw-r--r--test/src/sysenter-sysexit-cpl_3.ptt71
-rw-r--r--test/src/sysenter-sysexit.ptt71
-rw-r--r--test/src/tip-eos.ptt55
-rw-r--r--test/src/tip_pgd-direct_call.ptt58
-rw-r--r--test/src/tip_pgd-direct_jump.ptt58
-rw-r--r--test/src/tip_pgd-exstop-tip_pge.ptt70
-rw-r--r--test/src/tip_pgd-indirect_call.ptt58
-rw-r--r--test/src/tip_pgd-indirect_jump.ptt58
-rw-r--r--test/src/tip_pgd-pip-tip_pge.ptt71
-rw-r--r--test/src/tip_pgd-psb-stop.ptt64
-rw-r--r--test/src/tip_pgd-stop.ptt59
-rw-r--r--test/src/tip_pgd-tnt_not_taken.ptt61
-rw-r--r--test/src/tip_pgd-tnt_taken.ptt61
-rw-r--r--test/src/tip_pgd-tsx.ptt78
-rw-r--r--test/src/tip_pgd_noip-far_jump.ptt54
-rw-r--r--test/src/tip_pgd_noip-mov_cr3.ptt54
-rw-r--r--test/src/tip_pge-exstop.ptt63
-rw-r--r--test/src/tip_pge-fup-tip_pgd-tip_pge.ptt66
-rw-r--r--test/src/tip_pge-fup-tip_pgd.ptt56
-rw-r--r--test/src/tip_pge-ptw-fup-tip_pgd.ptt75
-rw-r--r--test/src/tip_pge-ptw-tip_pgd.ptt72
-rw-r--r--test/src/tip_pge-pwre-pwrx-tip_pgd.ptt63
-rw-r--r--test/src/tip_pge-tsx_abort-tip-fup-tip_pgd.ptt73
-rw-r--r--test/src/tip_pge-tsx_abort-tip_pgd.ptt67
-rw-r--r--test/src/tnt-tip_pgd_noip-sysret.ptt64
-rw-r--r--test/src/tnt_n-eos.ptt55
-rw-r--r--test/src/tnt_t-eos.ptt55
-rw-r--r--test/src/truncated.ptt63
-rw-r--r--test/src/tsc-cbr-cyc-tsc.ptt57
-rw-r--r--test/src/tsc-cyc_calibrate.ptt69
-rw-r--r--test/src/tsc-mtc-tma-mtc.ptt52
-rw-r--r--test/src/tsc-tma-cbr-cyc-mtc.ptt57
-rw-r--r--test/src/tsc-tma-cbr-cyc.ptt55
-rw-r--r--test/src/tsc-tma-cbr-mtc-cyc-mtc.ptt58
-rw-r--r--test/src/tsc-tma-cbr-mtc-cyc-no_cyc.ptt56
-rw-r--r--test/src/tsc-tma-cbr-mtc-cyc-tsc.ptt58
-rw-r--r--test/src/tsc-tma-cbr-mtc-cyc.ptt56
-rw-r--r--test/src/tsc-tma-cbr-mtc-cyc_calibrate.ptt60
-rw-r--r--test/src/tsc-tma-cbr-mtc-mtc-cyc.ptt63
-rw-r--r--test/src/tsc-tma-cyc.ptt52
-rw-r--r--test/src/tsc-tma-mtc-cyc_calibrate.ptt60
-rw-r--r--test/src/tsc-tma-mtc-mtc-cyc_calibrate.ptt63
-rw-r--r--test/src/tsc-tma-mtc-tsc.ptt54
-rw-r--r--test/src/tsc-tma-mtc_absolute.ptt52
-rw-r--r--test/src/tsc-tma-mtc_infreq.ptt55
-rw-r--r--test/src/tsc-tma-mtc_infreq_wrap.ptt55
-rw-r--r--test/src/tsc-tma-mtc_relative.ptt52
-rw-r--r--test/src/tsc-tma-mtc_wrap.ptt52
-rw-r--r--test/src/tsc-tma_zero_fc-cbr-cyc.ptt56
-rw-r--r--test/src/tsc_tma_mtc_gap.ptt52
-rw-r--r--test/src/tsx-abort.ptt76
-rw-r--r--test/src/tsx-commit.ptt71
-rw-r--r--test/src/tsx-no_spurious_commit.ptt71
-rw-r--r--test/src/vmcs-far_call.ptt68
370 files changed, 82916 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 000000000000..a4fdaf31b7b7
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,7 @@
+*.lst
+*.bin
+*.pt
+*.sb
+*.exp
+*.out
+*.diff
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 000000000000..33c5c1617cc4
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,304 @@
+# Copyright (c) 2013-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+cmake_minimum_required(VERSION 2.8.6)
+
+project(PT C)
+
+# versioning
+#
+# the major and the minor number define the supported Intel PT set.
+#
+# a build number and a version extension can be optionally specified.
+#
+set(PT_VERSION_MAJOR 1)
+set(PT_VERSION_MINOR 6)
+set(PT_VERSION_BUILD "0" CACHE STRING "")
+set(PT_VERSION_EXT "" CACHE STRING "")
+
+set(PT_VERSION "${PT_VERSION_MAJOR}.${PT_VERSION_MINOR}.${PT_VERSION_BUILD}")
+
+add_definitions(
+ -DPT_VERSION_MAJOR=${PT_VERSION_MAJOR}
+ -DPT_VERSION_MINOR=${PT_VERSION_MINOR}
+ -DPT_VERSION_BUILD=${PT_VERSION_BUILD}
+ -DPT_VERSION_EXT=\"${PT_VERSION_EXT}\"
+)
+
+include(GNUInstallDirs)
+include(FindUnixCommands)
+include(CheckCCompilerFlag)
+
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
+set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+set(MAN_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/man)
+
+set(CMAKE_COLOR_MAKEFILE OFF)
+set(CMAKE_VERBOSE_MAKEFILE ON)
+
+set(CMAKE_MACOSX_RPATH ON)
+
+option(FEATURE_THREADS "A small amount of multi-threading support." ON)
+if (FEATURE_THREADS)
+ add_definitions(-DFEATURE_THREADS)
+endif (FEATURE_THREADS)
+
+option(DEVBUILD "Enable compiler warnings and turn them into errors." OFF)
+
+option(PTDUMP "Enable ptdump, a packet dumper")
+option(PTXED "Enable ptxed, an instruction flow dumper")
+option(PTTC "Enable pttc, a test compiler")
+option(PTUNIT "Enable ptunit, a unit test system and libipt unit tests")
+option(MAN "Enable man pages (requires pandoc)." OFF)
+option(SIDEBAND "Enable libipt-sb, a sideband correlation library")
+
+if (SIDEBAND)
+ option(PEVENT "Enable perf_event sideband support." OFF)
+endif (SIDEBAND)
+
+if (PTXED OR PEVENT)
+ option(FEATURE_ELF "Support ELF files." OFF)
+endif (PTXED OR PEVENT)
+
+set(PTT OFF)
+if (BASH AND PTDUMP AND PTXED AND PTTC)
+ set(PTT ON)
+endif ()
+
+if (PTUNIT OR PTT)
+ ENABLE_TESTING()
+endif()
+
+if (PTUNIT)
+ enable_language(CXX)
+endif()
+
+include_directories(
+ include
+ ${CMAKE_CURRENT_BINARY_DIR}/libipt/include
+)
+
+if (PTUNIT)
+ include_directories(
+ ptunit/include
+ )
+endif (PTUNIT)
+
+if (FEATURE_ELF)
+ add_definitions(
+ -DFEATURE_ELF
+ )
+endif (FEATURE_ELF)
+
+if (SIDEBAND)
+ add_definitions(
+ -DFEATURE_SIDEBAND
+ )
+
+ include_directories(
+ ${CMAKE_CURRENT_BINARY_DIR}/sideband/include
+ )
+endif (SIDEBAND)
+
+if (PEVENT)
+ add_definitions(
+ -DFEATURE_PEVENT
+ )
+
+ include_directories(
+ pevent/include
+ )
+endif (PEVENT)
+
+
+function(add_cflag_if_available option)
+
+ check_c_compiler_flag(${option} ${option}_supported)
+ if (${option}_supported)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${option}" PARENT_SCOPE)
+ endif (${option}_supported)
+
+endfunction(add_cflag_if_available)
+
+
+if (CMAKE_HOST_WIN32)
+ include_directories(
+ include/windows
+ )
+
+ add_definitions(
+ # cl spells inline __inline in C
+ #
+ /Dinline=__inline
+
+ # cl spells strtoll _strtoi64
+ #
+ /Dstrtoll=_strtoi64
+
+ # cl spells strtoull _strtoui64
+ #
+ /Dstrtoull=_strtoui64
+
+ # avoid annoying warnings about unsecure standard functions
+ #
+ /D_CRT_SECURE_NO_WARNINGS
+ )
+
+ # enable parallel build
+ #
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /MP")
+
+ if (DEVBUILD)
+ # compiler warnings
+ #
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4")
+
+ # warnings are errors
+ #
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /WX")
+ endif (DEVBUILD)
+
+ if (CMAKE_C_COMPILER_ID MATCHES "MSVC")
+ # prevent complaints on:
+ # - do {} while(0) constructs
+ # - int arr[] constructs
+ #
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4127")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4200")
+
+ endif (CMAKE_C_COMPILER_ID MATCHES "MSVC")
+
+endif (CMAKE_HOST_WIN32)
+
+if (CMAKE_HOST_UNIX)
+ include_directories(
+ include/posix
+ )
+
+ add_definitions(
+ -D_POSIX_C_SOURCE=200809L
+ )
+
+ option(GCOV "Compile for GNU code coverage analysis." OFF)
+
+ if (GCOV)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fprofile-arcs")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -ftest-coverage")
+
+ link_libraries(gcov)
+ endif (GCOV)
+
+ if (FEATURE_THREADS)
+ link_libraries(pthread)
+ endif (FEATURE_THREADS)
+
+ # set the language
+ #
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c99")
+
+ # windows-like dll export model
+ #
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden")
+
+ if (DEVBUILD)
+ # compiler warnings
+ #
+ if (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Weverything")
+
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-disabled-macro-expansion")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-covered-switch-default")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-sign-conversion")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-switch-enum")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-cast-align")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-padded")
+ else (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wextra")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pedantic")
+
+ add_cflag_if_available("-Wimplicit-fallthrough=5")
+ endif (CMAKE_C_COMPILER_ID MATCHES "[Cc]lang")
+
+ # warnings are errors
+ #
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror")
+ endif (DEVBUILD)
+
+endif (CMAKE_HOST_UNIX)
+
+
+function(add_ptunit_test_base name)
+ if (PTUNIT)
+ add_executable(${name} ${ARGN})
+ target_link_libraries(${name} ptunit)
+
+ add_test(NAME ${name} COMMAND ${name})
+ endif (PTUNIT)
+endfunction(add_ptunit_test_base)
+
+function(add_ptunit_c_test name)
+ add_ptunit_test_base(ptunit-${name} test/src/ptunit-${name}.c ${ARGN})
+endfunction(add_ptunit_c_test)
+
+function(add_ptunit_cpp_test name)
+ add_ptunit_test_base(ptunit-${name} test/src/ptunit-${name}.cpp ${ARGN})
+endfunction(add_ptunit_cpp_test)
+
+function(add_ptunit_libraries name)
+ if (PTUNIT)
+ target_link_libraries(ptunit-${name} ${ARGN})
+ endif (PTUNIT)
+endfunction(add_ptunit_libraries)
+
+
+add_subdirectory(libipt)
+
+if (PTDUMP)
+ add_subdirectory(ptdump)
+endif (PTDUMP)
+if (PTXED)
+ add_subdirectory(ptxed)
+endif (PTXED)
+if (PTTC)
+ add_subdirectory(pttc)
+endif (PTTC)
+if (PTUNIT)
+ add_subdirectory(ptunit)
+endif (PTUNIT)
+if (PTT)
+ add_subdirectory(test)
+endif (PTT)
+if (MAN)
+ add_subdirectory(doc/man)
+endif (MAN)
+if (SIDEBAND)
+ add_subdirectory(sideband)
+endif (SIDEBAND)
+if (PEVENT)
+ add_subdirectory(pevent)
+endif (PEVENT)
diff --git a/CONTRIBUTING b/CONTRIBUTING
new file mode 100755
index 000000000000..51e56117eb2d
--- /dev/null
+++ b/CONTRIBUTING
@@ -0,0 +1,107 @@
+Contributing to this Project
+============================
+
+## License
+
+This project is licensed under the terms and conditions of the 3-Clause BSD
+[LICENSE](LICENSE). By contributing to this project, you agree that you are
+providing your contribution under the terms and conditions of that license.
+
+
+## Patches
+
+We accept patches to this project as pull requests on GitHub. When submitting
+patches, please keep each patch self-contained and as small as possible. Please
+address one topic per patch series. Intermediate patches must build without
+errors (with DEVBUILD=ON) and not introduce test fails. Please describe what
+each patch is doing in its commit message.
+
+If you are contributing a patch series that addresses a GitHub Issue, the last
+patch in the series should have 'fixes #<issue>' in its commit-message.
+
+If the patch series addresses a bug that is not tracked, please provide a
+detailed description of the issue in the commit-message, ideally with a
+description of the 'before' and 'after' behavior.
+
+The patch series should contain regression tests either as PTT tests or as
+ptunit tests. Please make sure that all tests are passing. This may require
+re-ordering patches to introduce the regression test after the issue was fixed.
+
+If the patch series adds a new feature, please make sure to add documentation.
+Prior to submitting this type of contribution, it may be a good idea to first
+discuss the feature as a GitHub issue or via email before implementing it.
+
+This project is using the Linux coding style.
+
+
+## Sign Your Patch
+
+Please use the sign-off line at the end of each patch. Your signature
+certifies that you wrote the patch or otherwise have the right to pass
+it on as an open-source patch. The rules are pretty simple: if you can
+certify the below (from
+[developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+Then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe.smith@email.com>
+
+Use your real name (sorry, no pseudonyms or anonymous contributions.)
+
+If you set your `user.name` and `user.email` git configs, you can sign your
+commit automatically with `git commit -s`.
+
+
+## Reporting Issues
+
+If you want to report an issue or bug, please report them via the GitHub Issues
+tracker.
+
+When reporting a bug, please provide the steps to reproduce it with the ptdump
+and ptxed tools contained in the tree. Please include the command-line that was
+used and the exact error message. You may also attach a trace file and the
+binaries necessary for reproducing the issue or write a small PTT test to
+demonstrate the issue.
+
+When providing trace snippets, please provide a few extra packets of context.
+
+Please also provide the processor family and model on which the trace was
+recorded and the version of the decoder that was used to decode the trace.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 000000000000..9e92f1510bbc
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,24 @@
+Copyright (c) 2013-2018, Intel Corporation
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ * Neither the name of Intel Corporation nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/README b/README
new file mode 100644
index 000000000000..6da6663c7bb1
--- /dev/null
+++ b/README
@@ -0,0 +1,79 @@
+Intel(R) Processor Trace Decoder Library
+========================================
+
+The Intel Processor Trace (Intel PT) Decoder Library is Intel's reference
+implementation for decoding Intel PT. It can be used as a standalone library or
+it can be partially or fully integrated into your tool.
+
+The library comes with a set of sample tools built on top of it and a test
+system built on top of the sample tools. The samples demonstrate how to use the
+library and may serve as a starting point for integrating the library into your
+tool.
+
+Go to https://software.intel.com/en-us/intel-platform-analysis-library for
+support of upcoming (non-public) processors (NDA required).
+
+
+Contents
+--------
+
+ README this file
+
+ libipt A packet encoder/decoder library
+
+
+Optional Contents and Samples
+-----------------------------
+
+ ptdump Example implementation of a packet dumper
+
+ ptxed Example implementation of a trace disassembler
+
+ pttc A trace test generator
+
+ ptunit A simple unit test system
+
+ sideband A sideband correlation library
+
+ pevent A library for reading/writing Linux perf event records
+
+ script A collection of scripts
+
+ test A collection of tests
+
+ include A collection of substitute headers
+
+ doc A document describing the build
+ A document describing how to get started
+ A document describing the usage of the decoder library
+ A document describing how to capture trace
+ A document describing pttc
+
+ doc/man Man pages for the encoder/decoder library
+
+
+Dependencies
+------------
+
+We use cmake for building.
+
+ cmake The cross-platform open-source build system.
+ http://www.cmake.org
+
+
+Other packages you need for some of the above optional components.
+
+ xed The Intel x86 instruction encoder and decoder.
+ https://github.com/intelxed/xed
+
+ This is needed to build and run ptxed.
+
+ yasm The Yasm Modular Assembler
+ http://github.com/yasm
+
+ This is needed to run pttc.
+
+ pandoc A universal document converter
+ http://pandoc.org
+
+ This is needed for man pages.
diff --git a/doc/getting_started.md b/doc/getting_started.md
new file mode 100755
index 000000000000..3b3b6ecfb4df
--- /dev/null
+++ b/doc/getting_started.md
@@ -0,0 +1,93 @@
+Getting Started {#start}
+========================
+
+<!---
+ ! Copyright (c) 2013-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+This chapter gives a brief introduction into the sample tools using one of the
+tests as example. It assumes that you are already familiar with Intel(R)
+Processor Trace (Intel PT) and that you already built the decoder library and
+the sample tools. For detailed information about Intel PT, please refer to
+chapter 11 of the Intel Architecture Instruction Set Extensions Programming
+Reference at http://www.intel.com/products/processor/manuals/.
+
+Start by compiling the loop-tnt test. It consists of a small assembly program
+with interleaved Intel PT directives:
+
+ $ pttc test/src/loop-tnt.ptt
+ loop-tnt-ptxed.exp
+ loop-tnt-ptdump.exp
+
+This produces the following output files:
+
+ loop-tnt.lst a yasm assembly listing file
+ loop-tnt.bin a raw binary file
+ loop-tnt.pt a Intel PT file
+ loop-tnt-ptxed.exp the expected ptxed output
+ loop-tnt-ptdump.exp the expected ptdump output
+
+The latter two files are generated based on the `@pt .exp(<tool>)` directives
+found in the `.ptt` file. They are used for automated testing. See
+script/test.bash for details on that.
+
+
+Use `ptdump` to dump the Intel PT packets:
+
+ $ ptdump loop-tnt.pt
+ 0000000000000000 psb
+ 0000000000000010 fup 3: 0x0000000000100000, ip=0x0000000000100000
+ 0000000000000017 mode.exec cs.d=0, cs.l=1 (64-bit mode)
+ 0000000000000019 psbend
+ 000000000000001b tnt8 !!.
+ 000000000000001c tip.pgd 3: 0x0000000000100013, ip=0x0000000000100013
+
+The ptdump tool takes an Intel PT file as input and dumps the packets in
+human-readable form. The number on the very left is the offset into the Intel
+PT packet stream in hex. This is followed by the packet opcode and payload.
+
+
+Use `ptxed` for reconstructing the execution flow. For this, you need the Intel
+PT file as well as the corresponding binary image. You need to specify the load
+address given by the org directive in the .ptt file when using a raw binary
+file.
+
+ $ ptxed --pt loop-tnt.pt --raw loop-tnt.bin:0x100000
+ 0x0000000000100000 mov rax, 0x0
+ 0x0000000000100007 jmp 0x10000d
+ 0x000000000010000d cmp rax, 0x1
+ 0x0000000000100011 jle 0x100009
+ 0x0000000000100009 add rax, 0x1
+ 0x000000000010000d cmp rax, 0x1
+ 0x0000000000100011 jle 0x100009
+ 0x0000000000100009 add rax, 0x1
+ 0x000000000010000d cmp rax, 0x1
+ 0x0000000000100011 jle 0x100009
+ [disabled]
+
+Ptxed prints disassembled instructions in execution order as well as status
+messages enclosed in brackets.
diff --git a/doc/howto_build.md b/doc/howto_build.md
new file mode 100755
index 000000000000..72417213fc38
--- /dev/null
+++ b/doc/howto_build.md
@@ -0,0 +1,197 @@
+Building the Intel(R) Processor Trace (Intel PT) Decoder Library and Samples {#build}
+============================================================================
+
+<!---
+ ! Copyright (c) 2013-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+This chapter gives step-by-step instructions for building the library and the
+sample tools using cmake. For detailed information on cmake, see
+http://www.cmake.org.
+
+
+## Configuration
+
+Besides the standard cmake options of build type and install directory, you will
+find project-specific options for enabling optional features, optional
+components, or optional build variants.
+
+
+### Optional Components
+
+By default, only the decoder library is built. Other components can be enabled
+by setting the respective cmake variable to ON.
+
+The following optional components are availble:
+
+ PTUNIT A simple unit test framework.
+ A collection of unit tests for libipt.
+
+ PTDUMP A packet dumper example.
+
+ PTXED A trace disassembler example.
+
+ PTTC A trace test generator.
+
+ SIDEBAND A sideband correlation library
+
+ PEVENT Support for the Linux perf_event sideband format.
+
+ This feature requires the linux/perf_event.h header.
+
+
+### Optional Features
+
+Features are enabled by setting the respective FEATURE_<name> cmake variable.
+This causes the FEATURE_<name> pre-processor macro to be defined and may also
+cause additional source files to be compiled and additional libraries to be
+linked.
+
+Features are enabled globally and will be used by all components that support
+the feature. The following features are supported:
+
+ FEATURE_ELF Support for the ELF object format.
+
+ This feature requires the elf.h header.
+
+
+ FEATURE_THREADS Support some amount of multi-threading.
+
+ This feature makes image functions thread-safe.
+
+
+### Build Variants
+
+Some build variants depend on libraries or header files that may not be
+available on all supported platforms.
+
+ GCOV Support for code coverage using libgcov.
+
+ This build variant requires libgcov and is not availble
+ on Windows.
+
+
+ DEVBUILD Enable compiler warnings and turn them into errors.
+
+
+### Version Settings
+
+The major and minor version numbers are set in the sources and must be changed
+there. You can set the build number and an arbitrary extension string.
+build.
+
+ PT_VERSION_BUILD The build number.
+
+ Defaults to zero.
+
+
+ PT_VERSION_EXT An arbitrary version extension string.
+
+ Defaults to the empty string.
+
+
+### Dependencies
+
+In order to build ptxed, the location of the XED library and the XED header
+files must be specified.
+
+ XED_INCLUDE Path to the directory containing the XED header files.
+
+ XED_LIBDIR Path to the directory containing the XED library.
+
+
+When using XED from a PIN distribution, the respective directories are located
+in `extras/xed2-<arch>/`.
+
+
+## Building on Linux``*`` and OS X``*``
+
+We recommend out-of-tree builds. Start by creating the destination directory
+and navigating into it:
+
+ $ mkdir -p /path/to/dest
+ $ cd /path/to/dest
+
+
+From here, call cmake with the top-level source directory as argument. You may
+already pass some or all of the cmake variables as arguments to cmake. Without
+arguments, cmake uses default values.
+
+ $ cmake /path/to/src
+
+
+If you have not passed values for XED_INCLUDE or XED_LIBDIR, you need to
+configure them now if you want to build ptxed. You may also use this command to
+change the configuration at any time later on.
+
+ $ make edit_cache
+
+
+After configuring the cmake cache, you can build either specific targets or
+everything using one of:
+
+ $ make <target>
+ $ make
+
+
+Use the help make target to learn about available make targets:
+
+ $ make help
+
+
+
+## Building on Windows``*``
+
+We recommend using the cmake GUI. After starting the cmake GUI, fill in the
+following fields:
+
+ Where is the source code: Path to the top-level source directory.
+
+ Where to build the binaries: Path to the destination directory.
+
+
+We recommend out-of-tree builds, so the build directory should not be the same
+as or below the source directory. After this first configuration step, press
+the
+
+ Configure
+
+button and select the builder you want to use.
+
+Cmake will now populate the remainder of the window with configuration options.
+Please make sure to specify at least XED_INCLUDE and XED_LIBDIR if you want to
+build ptxed. After completing the configuration, press the
+
+ Generate
+
+button. If you selected a Visual Studio generator in the first step, cmake will
+now generate a Visual Studio solution. You can repeat this step if you want to
+change the configuration later on. Beware that you always need to press the
+Generate button after changing the configuration.
+
+In the case of a Visual Studio generator, you may now open the generated Visual
+Studio solution and build the library and samples.
diff --git a/doc/howto_capture.md b/doc/howto_capture.md
new file mode 100644
index 000000000000..979991915eb7
--- /dev/null
+++ b/doc/howto_capture.md
@@ -0,0 +1,628 @@
+Capturing Intel(R) Processor Trace (Intel PT) {#capture}
+=============================================
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+This chapter describes how to capture Intel PT for processing with libipt. For
+illustration, we use the sample tools ptdump and ptxed. We assume that they are
+configured with:
+
+ * PEVENT=ON
+ * FEATURE_ELF=ON
+
+
+## Capturing Intel PT on Linux
+
+Starting with version 4.1, the Linux kernel supports Intel PT via the perf_event
+kernel interface. Starting with version 4.3, the perf user-space tool will
+support Intel PT as well.
+
+
+### Capturing Intel PT via Linux perf_event
+
+We start with setting up a perf_event_attr object for capturing Intel PT. The
+structure is declared in `/usr/include/linux/perf_event.h`.
+
+The Intel PT PMU type is dynamic. Its value can be read from
+`/sys/bus/event_source/devices/intel_pt/type`.
+
+~~~{.c}
+ struct perf_event_attr attr;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.size = sizeof(attr);
+ attr.type = <read type>();
+
+ attr.exclude_kernel = 1;
+ ...
+~~~
+
+
+Once all desired fields have been set, we can open a perf_event counter for
+Intel PT. See `perf_event_open(2)` for details. In our example, we configure
+it for tracing a single thread.
+
+The system call returns a file descriptor on success, `-1` otherwise.
+
+~~~{.c}
+ int fd;
+
+ fd = syscall(SYS_perf_event_open, &attr, <pid>, -1, -1, 0);
+~~~
+
+
+The Intel PT trace is captured in the AUX area, which has been introduced with
+kernel 4.1. The DATA area contains sideband information such as image changes
+that are necessary for decoding the trace.
+
+In theory, both areas can be configured as circular buffers or as linear buffers
+by mapping them read-only or read-write, respectively. When configured as
+circular buffer, new data will overwrite older data. When configured as linear
+buffer, the user is expected to continuously read out the data and update the
+buffer's tail pointer. New data that do not fit into the buffer will be
+dropped.
+
+When using the AUX area, its size and offset have to be filled into the
+`perf_event_mmap_page`, which is mapped together with the DATA area. This
+requires the DATA area to be mapped read-write and hence configured as linear
+buffer. In our example, we configure the AUX area as circular buffer.
+
+Note that the size of both the AUX and the DATA area has to be a power of two
+pages. The DATA area needs one additional page to contain the
+`perf_event_mmap_page`.
+
+~~~{.c}
+ struct perf_event_mmap_page *header;
+ void *base, *data, *aux;
+
+ base = mmap(NULL, (1+2**n) * PAGE_SIZE, PROT_WRITE, MAP_SHARED, fd, 0);
+ if (base == MAP_FAILED)
+ return <handle data mmap error>();
+
+ header = base;
+ data = base + header->data_offset;
+
+ header->aux_offset = header->data_offset + header->data_size;
+ header->aux_size = (2**m) * PAGE_SIZE;
+
+ aux = mmap(NULL, header->aux_size, PROT_READ, MAP_SHARED, fd,
+ header->aux_offset);
+ if (aux == MAP_FAILED)
+ return <handle aux mmap error>();
+~~~
+
+
+### Capturing Intel PT via the perf user-space tool
+
+Starting with kernel 4.3, the perf user-space tool can be used to capture Intel
+PT with the `intel_pt` event. See tools/perf/Documentation in the Linux kernel
+tree for further information. In this text, we describe how to use the captured
+trace with the ptdump and ptxed sample tools.
+
+We start with capturing some Intel PT trace using the `intel_pt` event. Note
+that when collecting system-wide (`-a`) trace, we need context switch events
+(`--switch-events`) to decode the trace. See `perf-record(1)` for details.
+
+~~~{.sh}
+ $ perf record -e intel_pt//[uk] [--per-thread] [-a --switch-events] -T -- ls
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 0.384 MB perf.data ]
+~~~
+
+
+This generates a file called `perf.data` that contains the Intel PT trace, the
+sideband information, and some metadata. To process the trace with ptxed, we
+extract the Intel PT trace into one file per thread or cpu.
+
+Looking at the raw trace dump of `perf script -D`, we notice
+`PERF_RECORD_AUXTRACE` records. The raw Intel PT trace is contained directly
+after such records. We can extract it with the `dd` command. The arguments to
+`dd` can be computed from the record's fields. This can be done automatically,
+for example with an AWK script.
+
+~~~{.awk}
+ /PERF_RECORD_AUXTRACE / {
+ offset = strtonum($1)
+ hsize = strtonum(substr($2, 2))
+ size = strtonum($5)
+ idx = strtonum($11)
+
+ ofile = sprintf("perf.data-aux-idx%d.bin", idx)
+ begin = offset + hsize
+
+ cmd = sprintf("dd if=perf.data of=%s conv=notrunc oflag=append ibs=1 \
+ skip=%d count=%d status=none", ofile, begin, size)
+
+ system(cmd)
+ }
+~~~
+
+The libipt tree contains such a script in `script/perf-read-aux.bash`.
+
+If we recorded in snapshot mode (perf record -S), we need to extract the Intel
+PT trace into one file per `PERF_RECORD_AUXTRACE` record. This can be done with
+an AWK script similar to the one above. Use `script/perf-read-aux.bash -S` when
+using the script from the libipt tree.
+
+
+In addition to the Intel PT trace, we need sideband information that describes
+process creation and termination, context switches, and memory image changes.
+This sideband information needs to be processed together with the trace. We
+therefore extract the sideband information from `perf.data`. This can again be
+done automatically with an AWK script:
+
+~~~{.awk}
+ function handle_record(ofile, offset, size) {
+ cmd = sprintf("dd if=%s of=%s conv=notrunc oflag=append ibs=1 skip=%d " \
+ "count=%d status=none", file, ofile, offset, size)
+
+ if (dry_run != 0) {
+ print cmd
+ }
+ else {
+ system(cmd)
+ }
+
+ next
+ }
+
+ function handle_global_record(offset, size) {
+ ofile = sprintf("%s-sideband.pevent", file)
+
+ handle_record(ofile, offset, size)
+ }
+
+ function handle_cpu_record(cpu, offset, size) {
+ # (uint32_t) -1 = 4294967295
+ #
+ if (cpu == -1 || cpu == 4294967295) {
+ handle_global_record(offset, size);
+ }
+ else {
+ ofile = sprintf("%s-sideband-cpu%d.pevent", file, cpu)
+
+ handle_record(ofile, offset, size)
+ }
+ }
+
+ /PERF_RECORD_AUXTRACE_INFO/ { next }
+ /PERF_RECORD_AUXTRACE/ { next }
+ /PERF_RECORD_FINISHED_ROUND/ { next }
+
+ /^[0-9]+ [0-9]+ 0x[0-9a-f]+ \[0x[0-9a-f]+\]: PERF_RECORD_/ {
+ cpu = strtonum($1)
+ begin = strtonum($3)
+ size = strtonum(substr($4, 2))
+
+ handle_cpu_record(cpu, begin, size)
+ }
+
+ /^[0-9]+ 0x[0-9a-f]+ \[0x[0-9a-f]+\]: PERF_RECORD_/ {
+ begin = strtonum($2)
+ size = strtonum(substr($3, 2))
+
+ handle_global_record(begin, size)
+ }
+
+ /^0x[0-9a-f]+ \[0x[0-9a-f]+\]: PERF_RECORD_/ {
+ begin = strtonum($1)
+ size = strtonum(substr($2, 2))
+
+ handle_global_record(begin, size)
+ }
+~~~
+
+The libipt tree contains such a script in `script/perf-read-sideband.bash`.
+
+
+In Linux, sideband is implemented as a sequence of perf_event records. Each
+record can optionally be followed by one or more samples that specify the cpu on
+which the record was created or a timestamp that specifies when the record was
+created. We use the timestamp sample to correlate sideband and trace.
+
+To process those samples, we need to know exactly what was sampled so that we
+can find the timestamp sample we are interested in. This information can be
+found in the `sample_type` field of `struct perf_event_attr`. We can extract
+this information from `perf.data` using the `perf evlist` command:
+
+~~~{.sh}
+ $ perf evlist -v
+ intel_pt//u: [...] sample_type: IP|TID|TIME|CPU|IDENTIFIER [...]
+ dummy:u: [...] sample_type: IP|TID|TIME|IDENTIFIER [...]
+~~~
+
+
+The command lists two items, one for the `intel_pt` perf_event counter and one
+for a `dummy` counter that is used for capturing context switch events.
+
+We translate the sample_type string using `PERF_EVENT_SAMPLE_*` enumeration
+constants defined in `/usr/include/linux/perf_event.h` into a single 64-bit
+integer constant. For example, `IP|TID|TIME|CPU|IDENTIFIER` translates into
+`0x10086`. Note that the `IP` sample type is reported but will not be attached
+to perf_event records. The resulting constant is then supplied as argument to
+the ptdump and ptxed option:
+
+ * --pevent:sample-type
+
+
+The translation can be done automatically using an AWK script, assuming that we
+already extracted the samle_type string:
+
+~~~{.awk}
+ BEGIN { RS = "[|\n]" }
+ /^TID$/ { config += 0x00002 }
+ /^TIME$/ { config += 0x00004 }
+ /^ID$/ { config += 0x00040 }
+ /^CPU$/ { config += 0x00080 }
+ /^STREAM$/ { config += 0x00200 }
+ /^IDENTIFIER$/ { config += 0x10000 }
+ END {
+ if (config != 0) {
+ printf(" --pevent:sample_type 0x%x", config)
+ }
+ }
+~~~
+
+
+Sideband and trace are time-correlated. Since Intel PT and perf use different
+time domains, we need a few parameters to translate between the two domains.
+The parameters can be found in `struct perf_event_mmap_page`, which is declared
+in `/usr/include/linux/perf_event.h`:
+
+ * time_shift
+ * time_mult
+ * time_zero
+
+The header also documents how to calculate TSC from perf_event timestamps.
+
+The ptdump and ptxed sample tools do this translation but we need to supply the
+parameters via corresponding options:
+
+ * --pevent:time-shift
+ * --pevent:time-mult
+ * --pevent:time-zero
+
+We can extract this information from the PERF_RECORD_AUXTRACE_INFO record. This
+is an artificial record that the perf tool synthesizes when capturing the trace.
+We can view it using the `perf script` command:
+
+~~~{.sh}
+ $ perf script --no-itrace -D | grep -A14 PERF_RECORD_AUXTRACE_INFO
+ 0x1a8 [0x88]: PERF_RECORD_AUXTRACE_INFO type: 1
+ PMU Type 6
+ Time Shift 10
+ Time Muliplier 642
+ Time Zero 18446744056970350213
+ Cap Time Zero 1
+ TSC bit 0x400
+ NoRETComp bit 0x800
+ Have sched_switch 0
+ Snapshot mode 0
+ Per-cpu maps 1
+ MTC bit 0x200
+ TSC:CTC numerator 0
+ TSC:CTC denominator 0
+ CYC bit 0x2
+~~~
+
+
+This will also give us the values for `cpuid[0x15].eax` and `cpuid[0x15].ebx`
+that we need for tracking time with `MTC` and `CYC` packets in `TSC:CTC
+denominator` and `TSC:CTC numerator` respectively. On processors that do not
+support `MTC` and `CYC`, the values are reported as zero.
+
+When decoding system-wide trace, we need to correlate context switch sideband
+events with decoded instructions from the trace to find a suitable location for
+switching the traced memory image for the scheduled-in process. The heuristics
+we use rely on sufficiently precise timing information. If timing information
+is too coarse, we might map the contex switch to the wrong location.
+
+When tracing ring-0, we use any code in kernel space. Since the kernel is
+mapped into every process, this is good enough as long as we are not interested
+in identifying processes and threads in the trace. To allow ptxed to
+distinguish kernel from user addresses, we provide the start address of the
+kernel via the option:
+
+ * --pevent:kernel-start
+
+
+We can find the address in `kallsyms` and we can extract it automatically using
+an AWK script:
+
+~~~{.awk}
+ function update_kernel_start(vaddr) {
+ if (vaddr < kernel_start) {
+ kernel_start = vaddr
+ }
+ }
+
+ BEGIN { kernel_start = 0xffffffffffffffff }
+ /^[0-9a-f]+ T _text$/ { update_kernel_start(strtonum("0x" $1)) }
+ /^[0-9a-f]+ T _stext$/ { update_kernel_start(strtonum("0x" $1)) }
+ END {
+ if (kernel_start < 0xffffffffffffffff) {
+ printf(" --pevent:kernel-start 0x%x", kernel_start)
+ }
+ }
+~~~
+
+
+When not tracing ring-0, we use a region where tracing has been disabled
+assuming that tracing is disabled due to a ring transition.
+
+
+To apply processor errata we need to know on which processor the trace was
+collected and provide this information to ptxed using the
+
+ * --cpu
+
+option. We can find this information in the `perf.data` header using the `perf
+script --header-only` command:
+
+~~~{.sh}
+ $ perf script --header-only | grep cpuid
+ # cpuid : GenuineIntel,6,61,4
+~~~
+
+
+The libipt tree contains a script in `script/perf-get-opts.bash` that computes
+all the perf_event related options from `perf.data` and from previously
+extracted sideband information.
+
+
+The kernel uses special filenames in `PERF_RECORD_MMAP` and `PERF_RECORD_MMAP2`
+records to indicate pseudo-files that can not be found directly on disk. One
+such special filename is
+
+ * [vdso]
+
+which corresponds to the virtual dynamic shared object that is mapped into every
+process. See `vdso(7)` for details. Depending on the installation there may be
+different vdso flavors. We need to specify the location of each flavor that is
+referenced in the trace via corresponding options:
+
+ * --pevent:vdso-x64
+ * --pevent:vdso-x32
+ * --pevent:vdso-ia32
+
+The perf tool installation may provide utilities called:
+
+ * perf-read-vdso32
+ * perf-read-vdsox32
+
+for reading the ia32 and the x32 vdso flavors. If the native flavor is not
+specified or the specified file does not exist, ptxed will copy its own vdso
+into a temporary file and use that. This may not work for remote decode, nor
+can ptxed provide other vdso flavors.
+
+
+Let's put it all together. Note that we use the `-m` option of
+`script/perf-get-opts.bash` to specify the master sideband file for the cpu on
+which we want to decode the trace. We further enable tick events for finer
+grain sideband correlation.
+
+~~~{.sh}
+ $ perf record -e intel_pt//u -T --switch-events -- grep -r foo /usr/include
+ [ perf record: Woken up 18 times to write data ]
+ [ perf record: Captured and wrote 30.240 MB perf.data ]
+ $ script/perf-read-aux.bash
+ $ script/perf-read-sideband.bash
+ $ ptdump $(script/perf-get-opts.bash) perf.data-aux-idx0.bin
+ [...]
+ $ ptxed $(script/perf-get-opts.bash -m perf.data-sideband-cpu0.pevent)
+ --pevent:vdso... --event:tick --pt perf.data-aux-idx0.bin
+ [...]
+~~~
+
+
+When tracing ring-0 code, we need to use `perf-with-kcore` for recording and
+supply the `perf.data` directory as additional argument after the `record` perf
+sub-command. When `perf-with-kcore` completes, the `perf.data` directory
+contains `perf.data` as well as a directory `kcore_dir` that contains copies of
+`/proc/kcore` and `/proc/kallsyms`. We need to supply the path to `kcore_dir`
+to `script/perf-get-opts.bash` using the `-k` option.
+
+~~~{.sh}
+ $ perf-with-kcore record dir -e intel_pt// -T -a --switch-events -- sleep 10
+ [ perf record: Woken up 26 times to write data ]
+ [ perf record: Captured and wrote 54.238 MB perf.data ]
+ Copying kcore
+ Done
+ $ cd dir
+ $ script/perf-read-aux.bash
+ $ script/perf-read-sideband.bash
+ $ ptdump $(script/perf-get-opts.bash) perf.data-aux-idx0.bin
+ [...]
+ $ ptxed $(script/perf-get-opts.bash -k kcore_dir
+ -m perf.data-sideband-cpu0.pevent)
+ --pevent:vdso... --event:tick --pt perf.data-aux-idx0.bin
+ [...]
+~~~
+
+
+#### Remote decode
+
+To decode the recorded trace on a different system, we copy all the files
+referenced in the trace to the system on which the trace is being decoded and
+point ptxed to the respective root directory using the option:
+
+ * --pevent:sysroot
+
+
+Ptxed will prepend the sysroot directory to every filename referenced in
+`PERF_RECORD_MMAP` and `PERF_RECORD_MMAP2` records.
+
+Note that like most configuration options, the `--pevent.sysroot` option needs
+to precede `--pevent:primary` and `-pevent:secondary` options.
+
+
+We can extract the referenced file names from `PERF_RECORD_MMAP` and
+`PERF_RECORD_MMAP2` records in the output of `perf script -D` and we can
+automatically copy the files using an AWK script:
+
+~~~{.awk}
+ function dirname(file) {
+ items = split(file, parts, "/", seps)
+
+ delete parts[items]
+
+ dname = ""
+ for (part in parts) {
+ dname = dname seps[part-1] parts[part]
+ }
+
+ return dname
+ }
+
+ function handle_mmap(file) {
+ # ignore any non-absolute filename
+ #
+ # this covers pseudo-files like [kallsyms] or [vdso]
+ #
+ if (substr(file, 0, 1) != "/") {
+ return
+ }
+
+ # ignore kernel modules
+ #
+ # we rely on kcore
+ #
+ if (match(file, /\.ko$/) != 0) {
+ return
+ }
+
+ # ignore //anon
+ #
+ if (file == "//anon") {
+ return
+ }
+
+ dst = outdir file
+ dir = dirname(dst)
+
+ system("mkdir -p " dir)
+ system("cp " file " " dst)
+ }
+
+ /PERF_RECORD_MMAP/ { handle_mmap($NF) }
+~~~
+
+The libipt tree contains such a script in `script/perf-copy-mapped-files.bash`.
+It will also read the vdso flavors for which the perf installation provides
+readers.
+
+We use the `-s` option of `script/perf-get-opts.bash` to have it generate
+options for the sysroot directory and for the vdso flavors found in that
+sysroot.
+
+For the remote decode case, we thus get (assuming kernel and user tracing on a
+64-bit system):
+
+~~~{.sh}
+ [record]
+ $ perf-with-kcore record dir -e intel_pt// -T -a --switch-events -- sleep 10
+ [ perf record: Woken up 26 times to write data ]
+ [ perf record: Captured and wrote 54.238 MB perf.data ]
+ Copying kcore
+ Done
+ $ cd dir
+ $ script/perf-copy-mapped-files.bash -o sysroot
+
+ [copy dir to remote system]
+
+ [decode]
+ $ script/perf-read-aux.bash
+ $ script/perf-read-sideband.bash
+ $ ptdump $(script/perf-get-opts.bash -s sysroot) perf.data-aux-idx0.bin
+ [...]
+ $ ptxed $(script/perf-get-opts.bash -s sysroot -k kcore_dir
+ -m perf.data-sideband-cpu0.pevent)
+ --event:tick --pt perf.data-aux-idx0.bin
+ [...]
+~~~
+
+
+#### Troubleshooting
+
+##### Sideband correlation and `no memory mapped at this address` errors
+
+If timing information in the trace is too coarse, we may end up applying
+sideband events too late. This typically results in `no memory mapped at this
+address` errors.
+
+Try to increase timing precision by increasing the MTC frequency or by enabling
+cycle-accurate tracing. If this does not help or is not an option, ptxed can
+process sideband events earlier than timing information indicates. Supply a
+suitable value to ptxed's option:
+
+ * --pevent:tsc-offset
+
+
+This option adds its argument to the timing information in the trace and so
+causes sideband events to be processed earlier. There is logic in ptxed to
+determine a suitable location in the trace for applying some sideband events.
+For example, a context switch event is postponed until tracing is disabled or
+enters the kernel.
+
+Those heuristics have their limits, of course. If the tsc offset is chosen too
+big, ptxed may end up mapping a sideband event to the wrong kernel entry.
+
+
+##### Sideband and trace losses leading to decode errors
+
+The perf tool reads trace and sideband while it is being collected and stores it
+in `perf.data`. If it fails to keep up, perf_event records or trace may be
+lost. The losses are indicated in the sideband:
+
+ * `PERF_RECORD_LOST` indicates sideband losses
+ * `PERF_RECORD_AUX.TRUNCATED` indicates trace losses
+
+
+Sideband losses may go unnoticed or may lead to decode errors. Typical errors
+are:
+
+ * `no memory mapped at this address`
+ * `decoder out of sync`
+ * `trace stream does not match query`
+
+
+Ptxed diagnoses sideband losses as warning both to stderr and to stdout
+interleaved with the normal output.
+
+Trace losses may go unnoticed or may lead to all kinds of errors. Ptxed
+diagnoses trace losses as warning to stderr.
+
+
+### Capturing Intel PT via Simple-PT
+
+The Simple-PT project on github supports capturing Intel PT on Linux with an
+alternative kernel driver. The spt decoder supports sideband information.
+
+See the project's page at https://github.com/andikleen/simple-pt for more
+information including examples.
diff --git a/doc/howto_libipt.md b/doc/howto_libipt.md
new file mode 100644
index 000000000000..1b3f2ab1555d
--- /dev/null
+++ b/doc/howto_libipt.md
@@ -0,0 +1,1271 @@
+Decoding Intel(R) Processor Trace Using libipt {#libipt}
+========================================================
+
+<!---
+ ! Copyright (c) 2013-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+This chapter describes how to use libipt for various tasks around Intel
+Processor Trace (Intel PT). For code examples, refer to the sample tools that
+are contained in the source tree:
+
+ * *ptdump* A packet dumper example.
+ * *ptxed* A control-flow reconstruction example.
+ * *pttc* A packet encoder example.
+
+
+For detailed information about Intel PT, please refer to the respective chapter
+in Volume 3 of the Intel Software Developer's Manual at
+http://www.intel.com/sdm.
+
+
+## Introduction
+
+The libipt decoder library provides multiple layers of abstraction ranging from
+packet encoding and decoding to full execution flow reconstruction. The layers
+are organized as follows:
+
+ * *packets* This layer deals with raw Intel PT packets.
+
+ * *events* This layer deals with packet combinations that
+ encode higher-level events.
+
+ * *instruction flow* This layer deals with the execution flow on the
+ instruction level.
+
+ * *block* This layer deals with the execution flow on the
+ instruction level.
+
+ It is faster than the instruction flow decoder but
+ requires a small amount of post-processing.
+
+
+Each layer provides its own encoder or decoder struct plus a set of functions
+for allocating and freeing encoder or decoder objects and for synchronizing
+decoders onto the Intel PT packet stream. Function names are prefixed with
+`pt_<lyr>_` where `<lyr>` is an abbreviation of the layer name. The following
+abbreviations are used:
+
+ * *enc* Packet encoding (packet layer).
+ * *pkt* Packet decoding (packet layer).
+ * *qry* Event (or query) layer.
+ * *insn* Instruction flow layer.
+ * *blk* Block layer.
+
+
+Here is some generic example code for working with decoders:
+
+~~~{.c}
+ struct pt_<layer>_decoder *decoder;
+ struct pt_config config;
+ int errcode;
+
+ memset(&config, 0, sizeof(config));
+ config.size = sizeof(config);
+ config.begin = <pt buffer begin>;
+ config.end = <pt buffer end>;
+ config.cpu = <cpu identifier>;
+ config...
+
+ decoder = pt_<lyr>_alloc_decoder(&config);
+ if (!decoder)
+ <handle error>(errcode);
+
+ errcode = pt_<lyr>_sync_<where>(decoder);
+ if (errcode < 0)
+ <handle error>(errcode);
+
+ <use decoder>(decoder);
+
+ pt_<lyr>_free_decoder(decoder);
+~~~
+
+First, configure the decoder. As a minimum, the size of the config struct and
+the `begin` and `end` of the buffer containing the Intel PT data need to be set.
+Configuration options details will be discussed later in this chapter. In the
+case of packet encoding, this is the begin and end address of the pre-allocated
+buffer, into which Intel PT packets shall be written.
+
+Next, allocate a decoder object for the layer you are interested in. A return
+value of NULL indicates an error. There is no further information available on
+the exact error condition. Most of the time, however, the error is the result
+of an incomplete or inconsistent configuration.
+
+Before the decoder can be used, it needs to be synchronized onto the Intel PT
+packet stream specified in the configuration. The only exception to this is the
+packet encoder, which is implicitly synchronized onto the beginning of the Intel
+PT buffer.
+
+Depending on the type of decoder, one or more synchronization options are
+available.
+
+ * `pt_<lyr>_sync_forward()` Synchronize onto the next PSB in forward
+ direction (or the first PSB if not yet
+ synchronized).
+
+ * `pt_<lyr>_sync_backward()` Synchronize onto the next PSB in backward
+ direction (or the last PSB if not yet
+ synchronized).
+
+ * `pt_<lyr>_sync_set()` Set the synchronization position to a
+ user-defined location in the Intel PT packet
+ stream.
+ There is no check whether the specified
+ location makes sense or is valid.
+
+
+After synchronizing, the decoder can be used. While decoding, the decoder
+stores the location of the last PSB it encountered during normal decode.
+Subsequent calls to pt_<lyr>_sync_forward() will start searching from that
+location. This is useful for re-synchronizing onto the Intel PT packet stream
+in case of errors. An example of a typical decode loop is given below:
+
+~~~{.c}
+ for (;;) {
+ int errcode;
+
+ errcode = <use decoder>(decoder);
+ if (errcode >= 0)
+ continue;
+
+ if (errcode == -pte_eos)
+ return;
+
+ <report error>(errcode);
+
+ do {
+ errcode = pt_<lyr>_sync_forward(decoder);
+
+ if (errcode == -pte_eos)
+ return;
+ } while (errcode < 0);
+ }
+~~~
+
+You can get the current decoder position as offset into the Intel PT buffer via:
+
+ pt_<lyr>_get_offset()
+
+
+You can get the position of the last synchronization point as offset into the
+Intel PT buffer via:
+
+ pt_<lyr>_get_sync_offset()
+
+
+Each layer will be discussed in detail below. In the remainder of this section,
+general functionality will be considered.
+
+
+### Version
+
+You can query the library version using:
+
+ * `pt_library_version()`
+
+
+This function returns a version structure that can be used for compatibility
+checks or simply for reporting the version of the decoder library.
+
+
+### Errors
+
+The library uses a single error enum for all layers.
+
+ * `enum pt_error_code` An enumeration of encode and decode errors.
+
+
+Errors are typically represented as negative pt_error_code enumeration constants
+and returned as an int. The library provides two functions for dealing with
+errors:
+
+ * `pt_errcode()` Translate an int return value into a pt_error_code
+ enumeration constant.
+
+ * `pt_errstr()` Returns a human-readable error string.
+
+
+Not all errors may occur on every layer. Every API function specifies the
+errors it may return.
+
+
+### Configuration
+
+Every encoder or decoder allocation function requires a configuration argument.
+Some of its fields have already been discussed in the example above. Refer to
+the `intel-pt.h` header for detailed and up-to-date documentation of each field.
+
+As a minimum, the `size` field needs to be set to `sizeof(struct pt_config)` and
+`begin` and `end` need to be set to the Intel PT buffer to use.
+
+The size is used for detecting library version mismatches and to provide
+backwards compatibility. Without the proper `size`, decoder allocation will
+fail.
+
+Although not strictly required, it is recommended to also set the `cpu` field to
+the processor, on which Intel PT has been collected (for decoders), or for which
+Intel PT shall be generated (for encoders). This allows implementing
+processor-specific behavior such as erratum workarounds.
+
+
+## The Packet Layer
+
+This layer deals with Intel PT packet encoding and decoding. It can further be
+split into three sub-layers: opcodes, encoding, and decoding.
+
+
+### Opcodes
+
+The opcodes layer provides enumerations for all the bits necessary for Intel PT
+encoding and decoding. The enumeration constants can be used without linking to
+the decoder library. There is no encoder or decoder struct associated with this
+layer. See the intel-pt.h header file for details.
+
+
+### Packet Encoding
+
+The packet encoding layer provides support for encoding Intel PT
+packet-by-packet. Start by configuring and allocating a `pt_packet_encoder` as
+shown below:
+
+~~~{.c}
+ struct pt_encoder *encoder;
+ struct pt_config config;
+ int errcode;
+
+ memset(&config, 0, sizeof(config));
+ config.size = sizeof(config);
+ config.begin = <pt buffer begin>;
+ config.end = <pt buffer end>;
+ config.cpu = <cpu identifier>;
+
+ encoder = pt_alloc_encoder(&config);
+ if (!encoder)
+ <handle error>(errcode);
+~~~
+
+For packet encoding, only the mandatory config fields need to be filled in.
+
+The allocated encoder object will be implicitly synchronized onto the beginning
+of the Intel PT buffer. You may change the encoder's position at any time by
+calling `pt_enc_sync_set()` with the desired buffer offset.
+
+Next, fill in a `pt_packet` object with details about the packet to be encoded.
+You do not need to fill in the `size` field. The needed size is computed by the
+encoder. There is no consistency check with the size specified in the packet
+object. The following example encodes a TIP packet:
+
+~~~{.c}
+ struct pt_packet_encoder *encoder = ...;
+ struct pt_packet packet;
+ int errcode;
+
+ packet.type = ppt_tip;
+ packet.payload.ip.ipc = pt_ipc_update_16;
+ packet.payload.ip.ip = <ip>;
+~~~
+
+For IP packets, for example FUP or TIP.PGE, there is no need to mask out bits in
+the `ip` field that will not be encoded in the packet due to the specified IP
+compression in the `ipc` field. The encoder will ignore them.
+
+There are no consistency checks whether the specified IP compression in the
+`ipc` field is allowed in the current context or whether decode will result in
+the full IP specified in the `ip` field.
+
+Once the packet object has been filled, it can be handed over to the encoder as
+shown here:
+
+~~~{.c}
+ errcode = pt_enc_next(encoder, &packet);
+ if (errcode < 0)
+ <handle error>(errcode);
+~~~
+
+The encoder will encode the packet, write it into the Intel PT buffer, and
+advance its position to the next byte after the packet. On a successful encode,
+it will return the number of bytes that have been written. In case of errors,
+nothing will be written and the encoder returns a negative error code.
+
+
+### Packet Decoding
+
+The packet decoding layer provides support for decoding Intel PT
+packet-by-packet. Start by configuring and allocating a `pt_packet_decoder` as
+shown here:
+
+~~~{.c}
+ struct pt_packet_decoder *decoder;
+ struct pt_config config;
+ int errcode;
+
+ memset(&config, 0, sizeof(config));
+ config.size = sizeof(config);
+ config.begin = <pt buffer begin>;
+ config.end = <pt buffer end>;
+ config.cpu = <cpu identifier>;
+ config.decode.callback = <decode function>;
+ config.decode.context = <decode context>;
+
+ decoder = pt_pkt_alloc_decoder(&config);
+ if (!decoder)
+ <handle error>(errcode);
+~~~
+
+For packet decoding, an optional decode callback function may be specified in
+addition to the mandatory config fields. If specified, the callback function
+will be called for packets the decoder does not know about. If there is no
+decode callback specified, the decoder will return `-pte_bad_opc`. In addition
+to the callback function pointer, an optional pointer to user-defined context
+information can be specified. This context will be passed to the decode
+callback function.
+
+Before the decoder can be used, it needs to be synchronized onto the Intel PT
+packet stream. Packet decoders offer three synchronization functions. To
+iterate over synchronization points in the Intel PT packet stream in forward or
+backward direction, use one of the following two functions respectively:
+
+ pt_pkt_sync_forward()
+ pt_pkt_sync_backward()
+
+
+To manually synchronize the decoder at a particular offset into the Intel PT
+packet stream, use the following function:
+
+ pt_pkt_sync_set()
+
+
+There are no checks to ensure that the specified offset is at the beginning of a
+packet. The example below shows synchronization to the first synchronization
+point:
+
+~~~{.c}
+ struct pt_packet_decoder *decoder;
+ int errcode;
+
+ errcode = pt_pkt_sync_forward(decoder);
+ if (errcode < 0)
+ <handle error>(errcode);
+~~~
+
+The decoder will remember the last synchronization packet it decoded.
+Subsequent calls to `pt_pkt_sync_forward` and `pt_pkt_sync_backward` will use
+this as their starting point.
+
+You can get the current decoder position as offset into the Intel PT buffer via:
+
+ pt_pkt_get_offset()
+
+
+You can get the position of the last synchronization point as offset into the
+Intel PT buffer via:
+
+ pt_pkt_get_sync_offset()
+
+
+Once the decoder is synchronized, you can iterate over packets by repeated calls
+to `pt_pkt_next()` as shown in the following example:
+
+~~~{.c}
+ struct pt_packet_decoder *decoder;
+ int errcode;
+
+ for (;;) {
+ struct pt_packet packet;
+
+ errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
+ if (errcode < 0)
+ break;
+
+ <process packet>(&packet);
+ }
+~~~
+
+
+## The Event Layer
+
+The event layer deals with packet combinations that encode higher-level events.
+It is used for reconstructing execution flow for users who need finer-grain
+control not available via the instruction flow layer or for users who want to
+integrate execution flow reconstruction with other functionality more tightly
+than it would be possible otherwise.
+
+This section describes how to use the query decoder for reconstructing execution
+flow. See the instruction flow decoder as an example. Start by configuring and
+allocating a `pt_query_decoder` as shown below:
+
+~~~{.c}
+ struct pt_query_decoder *decoder;
+ struct pt_config config;
+ int errcode;
+
+ memset(&config, 0, sizeof(config));
+ config.size = sizeof(config);
+ config.begin = <pt buffer begin>;
+ config.end = <pt buffer end>;
+ config.cpu = <cpu identifier>;
+ config.decode.callback = <decode function>;
+ config.decode.context = <decode context>;
+
+ decoder = pt_qry_alloc_decoder(&config);
+ if (!decoder)
+ <handle error>(errcode);
+~~~
+
+An optional packet decode callback function may be specified in addition to the
+mandatory config fields. If specified, the callback function will be called for
+packets the decoder does not know about. The query decoder will ignore the
+unknown packet except for its size in order to skip it. If there is no decode
+callback specified, the decoder will abort with `-pte_bad_opc`. In addition to
+the callback function pointer, an optional pointer to user-defined context
+information can be specified. This context will be passed to the decode
+callback function.
+
+Before the decoder can be used, it needs to be synchronized onto the Intel PT
+packet stream. To iterate over synchronization points in the Intel PT packet
+stream in forward or backward direction, the query decoders offer the following
+two synchronization functions respectively:
+
+ pt_qry_sync_forward()
+ pt_qry_sync_backward()
+
+
+To manually synchronize the decoder at a synchronization point (i.e. PSB packet)
+in the Intel PT packet stream, use the following function:
+
+ pt_qry_sync_set()
+
+
+After successfully synchronizing, the query decoder will start reading the PSB+
+header to initialize its internal state. If tracing is enabled at this
+synchronization point, the IP of the instruction, at which decoding should be
+started, is returned. If tracing is disabled at this synchronization point, it
+will be indicated in the returned status bits (see below). In this example,
+synchronization to the first synchronization point is shown:
+
+~~~{.c}
+ struct pt_query_decoder *decoder;
+ uint64_t ip;
+ int status;
+
+ status = pt_qry_sync_forward(decoder, &ip);
+ if (status < 0)
+ <handle error>(status);
+~~~
+
+In addition to a query decoder, you will need an instruction decoder for
+decoding and classifying instructions.
+
+
+#### In A Nutshell
+
+After synchronizing, you begin decoding instructions starting at the returned
+IP. As long as you can determine the next instruction in execution order, you
+continue on your own. Only when the next instruction cannot be determined by
+examining the current instruction, you would ask the query decoder for guidance:
+
+ * If the current instruction is a conditional branch, the
+ `pt_qry_cond_branch()` function will tell whether it was taken.
+
+ * If the current instruction is an indirect branch, the
+ `pt_qry_indirect_branch()` function will provide the IP of its destination.
+
+
+~~~{.c}
+ struct pt_query_decoder *decoder;
+ uint64_t ip;
+
+ for (;;) {
+ struct <instruction> insn;
+
+ insn = <decode instruction>(ip);
+
+ ip += <instruction size>(insn);
+
+ if (<is cond branch>(insn)) {
+ int status, taken;
+
+ status = pt_qry_cond_branch(decoder, &taken);
+ if (status < 0)
+ <handle error>(status);
+
+ if (taken)
+ ip += <branch displacement>(insn);
+ } else if (<is indirect branch>(insn)) {
+ int status;
+
+ status = pt_qry_indirect_branch(decoder, &ip);
+ if (status < 0)
+ <handle error>(status);
+ }
+ }
+~~~
+
+
+Certain aspects such as, for example, asynchronous events or synchronizing at a
+location where tracing is disabled, have been ignored so far. Let us consider
+them now.
+
+
+#### Queries
+
+The query decoder provides four query functions:
+
+ * `pt_qry_cond_branch()` Query whether the next conditional branch was
+ taken.
+
+ * `pt_qry_indirect_branch()` Query for the destination IP of the next
+ indirect branch.
+
+ * `pt_qry_event()` Query for the next event.
+
+ * `pt_qry_time()` Query for the current time.
+
+
+Each function returns either a positive vector of status bits or a negative
+error code. For details on status bits and error conditions, please refer to
+the `pt_status_flag` and `pt_error_code` enumerations in the intel-pt.h header.
+
+The `pts_ip_suppressed` status bit is used to indicate that no IP is available
+at functions that are supposed to return an IP. Examples are the indirect
+branch query function and both synchronization functions.
+
+The `pts_event_pending` status bit is used to indicate that there is an event
+pending. You should query for this event before continuing execution flow
+reconstruction.
+
+The `pts_eos` status bit is used to indicate the end of the trace. Any
+subsequent query will return -pte_eos.
+
+
+#### Events
+
+Events are signaled ahead of time. When you query for pending events as soon as
+they are indicated, you will be aware of asynchronous events before you reach
+the instruction associated with the event.
+
+For example, if tracing is disabled at the synchronization point, the IP will be
+suppressed. In this case, it is very likely that a tracing enabled event is
+signaled. You will also get events for initializing the decoder state after
+synchronizing onto the Intel PT packet stream. For example, paging or execution
+mode events.
+
+See the `enum pt_event_type` and `struct pt_event` in the intel-pt.h header for
+details on possible events. This document does not give an example of event
+processing. Refer to the implementation of the instruction flow decoder in
+pt_insn.c for details.
+
+
+#### Timing
+
+To be able to signal events, the decoder reads ahead until it arrives at a query
+relevant packet. Errors encountered during that time will be postponed until
+the respective query call. This reading ahead affects timing. The decoder will
+always be a few packets ahead. When querying for the current time, the query
+will return the time at the decoder's current packet. This corresponds to the
+time at our next query.
+
+
+#### Return Compression
+
+If Intel PT has been configured to compress returns, a successfully compressed
+return is represented as a conditional branch instead of an indirect branch.
+For a RET instruction, you first query for a conditional branch. If the query
+succeeds, it should indicate that the branch was taken. In that case, the
+return has been compressed. A not taken branch indicates an error. If the
+query fails, the return has not been compressed and you query for an indirect
+branch.
+
+There is no guarantee that returns will be compressed. Even though return
+compression has been enabled, returns may still be represented as indirect
+branches.
+
+To reconstruct the execution flow for compressed returns, you would maintain a
+stack of return addresses. For each call instruction, push the IP of the
+instruction following the call onto the stack. For compressed returns, pop the
+topmost IP from the stack. See pt_retstack.h and pt_retstack.c for a sample
+implementation.
+
+
+## The Instruction Flow Layer
+
+The instruction flow layer provides a simple API for iterating over instructions
+in execution order. Start by configuring and allocating a `pt_insn_decoder` as
+shown below:
+
+~~~{.c}
+ struct pt_insn_decoder *decoder;
+ struct pt_config config;
+ int errcode;
+
+ memset(&config, 0, sizeof(config));
+ config.size = sizeof(config);
+ config.begin = <pt buffer begin>;
+ config.end = <pt buffer end>;
+ config.cpu = <cpu identifier>;
+ config.decode.callback = <decode function>;
+ config.decode.context = <decode context>;
+
+ decoder = pt_insn_alloc_decoder(&config);
+ if (!decoder)
+ <handle error>(errcode);
+~~~
+
+An optional packet decode callback function may be specified in addition to the
+mandatory config fields. If specified, the callback function will be called for
+packets the decoder does not know about. The decoder will ignore the unknown
+packet except for its size in order to skip it. If there is no decode callback
+specified, the decoder will abort with `-pte_bad_opc`. In addition to the
+callback function pointer, an optional pointer to user-defined context
+information can be specified. This context will be passed to the decode
+callback function.
+
+The image argument is optional. If no image is given, the decoder will use an
+empty default image that can be populated later on and that is implicitly
+destroyed when the decoder is freed. See below for more information on this.
+
+
+#### The Traced Image
+
+In addition to the Intel PT configuration, the instruction flow decoder needs to
+know the memory image for which Intel PT has been recorded. This memory image
+is represented by a `pt_image` object. If decoding failed due to an IP lying
+outside of the traced memory image, `pt_insn_next()` will return `-pte_nomap`.
+
+Use `pt_image_alloc()` to allocate and `pt_image_free()` to free an image.
+Images may not be shared. Every decoder must use a different image. Use this
+to prepare the image in advance or if you want to switch between images.
+
+Every decoder provides an empty default image that is used if no image is
+specified during allocation. The default image is implicitly destroyed when the
+decoder is freed. It can be obtained by calling `pt_insn_get_image()`. Use
+this if you only use one decoder and one image.
+
+An image is a collection of contiguous, non-overlapping memory regions called
+`sections`. Starting with an empty image, it may be populated with repeated
+calls to `pt_image_add_file()` or `pt_image_add_cached()`, one for each section,
+or with a call to `pt_image_copy()` to add all sections from another image. If
+a newly added section overlaps with an existing section, the existing section
+will be truncated or split to make room for the new section.
+
+In some cases, the memory image may change during the execution. You can use
+the `pt_image_remove_by_filename()` function to remove previously added sections
+by their file name and `pt_image_remove_by_asid()` to remove all sections for an
+address-space.
+
+In addition to adding sections, you can register a callback function for reading
+memory using `pt_image_set_callback()`. The `context` parameter you pass
+together with the callback function pointer will be passed to your callback
+function every time it is called. There can only be one callback at any time.
+Adding a new callback will remove any previously added callback. To remove the
+callback function, pass `NULL` to `pt_image_set_callback()`.
+
+Callback and files may be combined. The callback function is used whenever
+the memory cannot be found in any of the image's sections.
+
+If more than one process is traced, the memory image may change when the process
+context is switched. To simplify handling this case, an address-space
+identifier may be passed to each of the above functions to define separate
+images for different processes at the same time. The decoder will select the
+correct image based on context switch information in the Intel PT trace. If
+you want to manage this on your own, you can use `pt_insn_set_image()` to
+replace the image a decoder uses.
+
+
+#### The Traced Image Section Cache
+
+When using multiple decoders that work on related memory images it is desirable
+to share image sections between decoders. The underlying file sections will be
+mapped only once per image section cache.
+
+Use `pt_iscache_alloc()` to allocate and `pt_iscache_free()` to free an image
+section cache. Freeing the cache does not destroy sections added to the cache.
+They remain valid until they are no longer used.
+
+Use `pt_iscache_add_file()` to add a file section to an image section cache.
+The function returns an image section identifier (ISID) that uniquely identifies
+the section in this cache. Use `pt_image_add_cached()` to add a file section
+from an image section cache to an image.
+
+Multiple image section caches may be used at the same time but it is recommended
+not to mix sections from different image section caches in one image.
+
+A traced image section cache can also be used for reading an instruction's
+memory via its IP and ISID as provided in `struct pt_insn`.
+
+The image section cache provides a cache of recently mapped sections and keeps
+them mapped when they are unmapped by the images that used them. This avoid
+repeated unmapping and re-mapping of image sections in some parallel debug
+scenarios or when reading memory from the image section cache.
+
+Use `pt_iscache_set_limit()` to set the limit of this cache in bytes. This
+accounts for the extra memory that will be used for keeping image sections
+mapped including any block caches associated with image sections. To disable
+caching, set the limit to zero.
+
+
+#### Synchronizing
+
+Before the decoder can be used, it needs to be synchronized onto the Intel PT
+packet stream. To iterate over synchronization points in the Intel PT packet
+stream in forward or backward directions, the instruction flow decoders offer
+the following two synchronization functions respectively:
+
+ pt_insn_sync_forward()
+ pt_insn_sync_backward()
+
+
+To manually synchronize the decoder at a synchronization point (i.e. PSB packet)
+in the Intel PT packet stream, use the following function:
+
+ pt_insn_sync_set()
+
+
+The example below shows synchronization to the first synchronization point:
+
+~~~{.c}
+ struct pt_insn_decoder *decoder;
+ int errcode;
+
+ errcode = pt_insn_sync_forward(decoder);
+ if (errcode < 0)
+ <handle error>(errcode);
+~~~
+
+The decoder will remember the last synchronization packet it decoded.
+Subsequent calls to `pt_insn_sync_forward` and `pt_insn_sync_backward` will use
+this as their starting point.
+
+You can get the current decoder position as offset into the Intel PT buffer via:
+
+ pt_insn_get_offset()
+
+
+You can get the position of the last synchronization point as offset into the
+Intel PT buffer via:
+
+ pt_insn_get_sync_offset()
+
+
+#### Iterating
+
+Once the decoder is synchronized, you can iterate over instructions in execution
+flow order by repeated calls to `pt_insn_next()` as shown in the following
+example:
+
+~~~{.c}
+ struct pt_insn_decoder *decoder;
+ int status;
+
+ for (;;) {
+ struct pt_insn insn;
+
+ status = pt_insn_next(decoder, &insn, sizeof(insn));
+
+ if (insn.iclass != ptic_error)
+ <process instruction>(&insn);
+
+ if (status < 0)
+ break;
+
+ ...
+ }
+~~~
+
+Note that the example ignores non-error status returns.
+
+For each instruction, you get its IP, its size in bytes, the raw memory, an
+identifier for the image section that contained it, the current execution mode,
+and the speculation state, that is whether the instruction has been executed
+speculatively. In addition, you get a coarse classification that can be used
+for further processing without the need for a full instruction decode.
+
+If a traced image section cache is used the image section identifier can be used
+to trace an instruction back to the binary file that contained it. This allows
+mapping the instruction back to source code using the debug information
+contained in or reachable via the binary file.
+
+Beware that `pt_insn_next()` may indicate errors that occur after the returned
+instruction. The returned instruction is valid if its `iclass` field is set.
+
+
+#### Events
+
+The instruction flow decoder uses an event system similar to the query
+decoder's. Pending events are indicated by the `pts_event_pending` flag in the
+status flag bit-vector returned from `pt_insn_sync_<where>()`, `pt_insn_next()`
+and `pt_insn_event()`.
+
+When the `pts_event_pending` flag is set on return from `pt_insn_next()`, use
+repeated calls to `pt_insn_event()` to drain all queued events. Then switch
+back to calling `pt_insn_next()` to resume with instruction flow decode as
+shown in the following example:
+
+~~~{.c}
+ struct pt_insn_decoder *decoder;
+ int status;
+
+ for (;;) {
+ struct pt_insn insn;
+
+ status = pt_insn_next(decoder, &insn, sizeof(insn));
+ if (status < 0)
+ break;
+
+ <process instruction>(&insn);
+
+ while (status & pts_event_pending) {
+ struct pt_event event;
+
+ status = pt_insn_event(decoder, &event, sizeof(event));
+ if (status < 0)
+ <handle error>(status);
+
+ <process event>(&event);
+ }
+ }
+~~~
+
+
+#### The Instruction Flow Decode Loop
+
+If we put all of the above examples together, we end up with a decode loop as
+shown below:
+
+~~~{.c}
+ int handle_events(struct pt_insn_decoder *decoder, int status)
+ {
+ while (status & pts_event_pending) {
+ struct pt_event event;
+
+ status = pt_insn_event(decoder, &event, sizeof(event));
+ if (status < 0)
+ break;
+
+ <process event>(&event);
+ }
+
+ return status;
+ }
+
+ int decode(struct pt_insn_decoder *decoder)
+ {
+ int status;
+
+ for (;;) {
+ status = pt_insn_sync_forward(decoder);
+ if (status < 0)
+ break;
+
+ for (;;) {
+ struct pt_insn insn;
+
+ status = handle_events(decoder, status);
+ if (status < 0)
+ break;
+
+ status = pt_insn_next(decoder, &insn, sizeof(insn));
+
+ if (insn.iclass != ptic_error)
+ <process instruction>(&insn);
+
+ if (status < 0)
+ break;
+ }
+
+ <handle error>(status);
+ }
+
+ <handle error>(status);
+
+ return status;
+ }
+~~~
+
+
+## The Block Layer
+
+The block layer provides a simple API for iterating over blocks of sequential
+instructions in execution order. The instructions in a block are sequential in
+the sense that no trace is required for reconstructing the instructions. The IP
+of the first instruction is given in `struct pt_block` and the IP of other
+instructions in the block can be determined by decoding and examining the
+previous instruction.
+
+Start by configuring and allocating a `pt_block_decoder` as shown below:
+
+~~~{.c}
+ struct pt_block_decoder *decoder;
+ struct pt_config config;
+
+ memset(&config, 0, sizeof(config));
+ config.size = sizeof(config);
+ config.begin = <pt buffer begin>;
+ config.end = <pt buffer end>;
+ config.cpu = <cpu identifier>;
+ config.decode.callback = <decode function>;
+ config.decode.context = <decode context>;
+
+ decoder = pt_blk_alloc_decoder(&config);
+~~~
+
+An optional packet decode callback function may be specified in addition to the
+mandatory config fields. If specified, the callback function will be called for
+packets the decoder does not know about. The decoder will ignore the unknown
+packet except for its size in order to skip it. If there is no decode callback
+specified, the decoder will abort with `-pte_bad_opc`. In addition to the
+callback function pointer, an optional pointer to user-defined context
+information can be specified. This context will be passed to the decode
+callback function.
+
+
+#### Synchronizing
+
+Before the decoder can be used, it needs to be synchronized onto the Intel PT
+packet stream. To iterate over synchronization points in the Intel PT packet
+stream in forward or backward directions, the block decoder offers the following
+two synchronization functions respectively:
+
+ pt_blk_sync_forward()
+ pt_blk_sync_backward()
+
+
+To manually synchronize the decoder at a synchronization point (i.e. PSB packet)
+in the Intel PT packet stream, use the following function:
+
+ pt_blk_sync_set()
+
+
+The example below shows synchronization to the first synchronization point:
+
+~~~{.c}
+ struct pt_block_decoder *decoder;
+ int errcode;
+
+ errcode = pt_blk_sync_forward(decoder);
+ if (errcode < 0)
+ <handle error>(errcode);
+~~~
+
+The decoder will remember the last synchronization packet it decoded.
+Subsequent calls to `pt_blk_sync_forward` and `pt_blk_sync_backward` will use
+this as their starting point.
+
+You can get the current decoder position as offset into the Intel PT buffer via:
+
+ pt_blk_get_offset()
+
+
+You can get the position of the last synchronization point as offset into the
+Intel PT buffer via:
+
+ pt_blk_get_sync_offset()
+
+
+#### Iterating
+
+Once the decoder is synchronized, it can be used to iterate over blocks of
+instructions in execution flow order by repeated calls to `pt_blk_next()` as
+shown in the following example:
+
+~~~{.c}
+ struct pt_block_decoder *decoder;
+ int status;
+
+ for (;;) {
+ struct pt_block block;
+
+ status = pt_blk_next(decoder, &block, sizeof(block));
+
+ if (block.ninsn > 0)
+ <process block>(&block);
+
+ if (status < 0)
+ break;
+
+ ...
+ }
+~~~
+
+Note that the example ignores non-error status returns.
+
+A block contains enough information to reconstruct the instructions. See
+`struct pt_block` in `intel-pt.h` for details. Note that errors returned by
+`pt_blk_next()` apply after the last instruction in the provided block.
+
+It is recommended to use a traced image section cache so the image section
+identifier contained in a block can be used for reading the memory containing
+the instructions in the block. This also allows mapping the instructions back
+to source code using the debug information contained in or reachable via the
+binary file.
+
+In some cases, the last instruction in a block may cross image section
+boundaries. This can happen when a code segment is split into more than one
+image section. The block is marked truncated in this case and provides the raw
+bytes of the last instruction.
+
+The following example shows how instructions can be reconstructed from a block:
+
+~~~{.c}
+ struct pt_image_section_cache *iscache;
+ struct pt_block *block;
+ uint16_t ninsn;
+ uint64_t ip;
+
+ ip = block->ip;
+ for (ninsn = 0; ninsn < block->ninsn; ++ninsn) {
+ uint8_t raw[pt_max_insn_size];
+ <struct insn> insn;
+ int size;
+
+ if (block->truncated && ((ninsn +1) == block->ninsn)) {
+ memcpy(raw, block->raw, block->size);
+ size = block->size;
+ } else {
+ size = pt_iscache_read(iscache, raw, sizeof(raw), block->isid, ip);
+ if (size < 0)
+ break;
+ }
+
+ errcode = <decode instruction>(&insn, raw, size, block->mode);
+ if (errcode < 0)
+ break;
+
+ <process instruction>(&insn);
+
+ ip = <determine next ip>(&insn);
+ }
+~~~
+
+
+#### Events
+
+The block decoder uses an event system similar to the query decoder's. Pending
+events are indicated by the `pts_event_pending` flag in the status flag
+bit-vector returned from `pt_blk_sync_<where>()`, `pt_blk_next()` and
+`pt_blk_event()`.
+
+When the `pts_event_pending` flag is set on return from `pt_blk_sync_<where>()`
+or `pt_blk_next()`, use repeated calls to `pt_blk_event()` to drain all queued
+events. Then switch back to calling `pt_blk_next()` to resume with block decode
+as shown in the following example:
+
+~~~{.c}
+ struct pt_block_decoder *decoder;
+ int status;
+
+ for (;;) {
+ struct pt_block block;
+
+ status = pt_blk_next(decoder, &block, sizeof(block));
+ if (status < 0)
+ break;
+
+ <process block>(&block);
+
+ while (status & pts_event_pending) {
+ struct pt_event event;
+
+ status = pt_blk_event(decoder, &event, sizeof(event));
+ if (status < 0)
+ <handle error>(status);
+
+ <process event>(&event);
+ }
+ }
+~~~
+
+
+#### The Block Decode Loop
+
+If we put all of the above examples together, we end up with a decode loop as
+shown below:
+
+~~~{.c}
+ int process_block(struct pt_block *block,
+ struct pt_image_section_cache *iscache)
+ {
+ uint16_t ninsn;
+ uint64_t ip;
+
+ ip = block->ip;
+ for (ninsn = 0; ninsn < block->ninsn; ++ninsn) {
+ struct pt_insn insn;
+
+ memset(&insn, 0, sizeof(insn));
+ insn->speculative = block->speculative;
+ insn->isid = block->isid;
+ insn->mode = block->mode;
+ insn->ip = ip;
+
+ if (block->truncated && ((ninsn +1) == block->ninsn)) {
+ insn.truncated = 1;
+ insn.size = block->size;
+
+ memcpy(insn.raw, block->raw, insn.size);
+ } else {
+ int size;
+
+ size = pt_iscache_read(iscache, insn.raw, sizeof(insn.raw),
+ insn.isid, insn.ip);
+ if (size < 0)
+ return size;
+
+ insn.size = (uint8_t) size;
+ }
+
+ <decode instruction>(&insn);
+ <process instruction>(&insn);
+
+ ip = <determine next ip>(&insn);
+ }
+
+ return 0;
+ }
+
+ int handle_events(struct pt_blk_decoder *decoder, int status)
+ {
+ while (status & pts_event_pending) {
+ struct pt_event event;
+
+ status = pt_blk_event(decoder, &event, sizeof(event));
+ if (status < 0)
+ break;
+
+ <process event>(&event);
+ }
+
+ return status;
+ }
+
+ int decode(struct pt_blk_decoder *decoder,
+ struct pt_image_section_cache *iscache)
+ {
+ int status;
+
+ for (;;) {
+ status = pt_blk_sync_forward(decoder);
+ if (status < 0)
+ break;
+
+ for (;;) {
+ struct pt_block block;
+ int errcode;
+
+ status = handle_events(decoder, status);
+ if (status < 0)
+ break;
+
+ status = pt_blk_next(decoder, &block, sizeof(block));
+
+ errcode = process_block(&block, iscache);
+ if (errcode < 0)
+ status = errcode;
+
+ if (status < 0)
+ break;
+ }
+
+ <handle error>(status);
+ }
+
+ <handle error>(status);
+
+ return status;
+ }
+~~~
+
+
+## Parallel Decode
+
+Intel PT splits naturally into self-contained PSB segments that can be decoded
+independently. Use the packet or query decoder to search for PSB's using
+repeated calls to `pt_pkt_sync_forward()` and `pt_pkt_get_sync_offset()` (or
+`pt_qry_sync_forward()` and `pt_qry_get_sync_offset()`). The following example
+shows this using the query decoder, which will already give the IP needed in
+the next step.
+
+~~~{.c}
+ struct pt_query_decoder *decoder;
+ uint64_t offset, ip;
+ int status, errcode;
+
+ for (;;) {
+ status = pt_qry_sync_forward(decoder, &ip);
+ if (status < 0)
+ break;
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ if (errcode < 0)
+ <handle error>(errcode);
+
+ <split trace>(offset, ip, status);
+ }
+~~~
+
+The individual trace segments can then be decoded using the query, instruction
+flow, or block decoder as shown above in the previous examples.
+
+When stitching decoded trace segments together, a sequence of linear (in the
+sense that it can be decoded without Intel PT) code has to be filled in. Use
+the `pts_eos` status indication to stop decoding early enough. Then proceed
+until the IP at the start of the succeeding trace segment is reached. When
+using the instruction flow decoder, `pt_insn_next()` may be used for that as
+shown in the following example:
+
+~~~{.c}
+ struct pt_insn_decoder *decoder;
+ struct pt_insn insn;
+ int status;
+
+ for (;;) {
+ status = pt_insn_next(decoder, &insn, sizeof(insn));
+ if (status < 0)
+ <handle error>(status);
+
+ if (status & pts_eos)
+ break;
+
+ <process instruction>(&insn);
+ }
+
+ while (insn.ip != <next segment's start IP>) {
+ <process instruction>(&insn);
+
+ status = pt_insn_next(decoder, &insn, sizeof(insn));
+ if (status < 0)
+ <handle error>(status);
+ }
+~~~
+
+
+## Threading
+
+The decoder library API is not thread-safe. Different threads may allocate and
+use different decoder objects at the same time. Different decoders must not use
+the same image object. Use `pt_image_copy()` to give each decoder its own copy
+of a shared master image.
diff --git a/doc/howto_pttc.md b/doc/howto_pttc.md
new file mode 100755
index 000000000000..02cb597bc210
--- /dev/null
+++ b/doc/howto_pttc.md
@@ -0,0 +1,482 @@
+Testing the Intel(R) Processor Trace (Intel PT) Decoder Library and Samples {#pttc}
+===========================================================================
+
+<!---
+ ! Copyright (c) 2013-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+This chapter documents how to use the pttc tool to generate and run tests.
+Pttc takes a yasm assembly file and creates a Processor Trace stream from
+special directives in its input.
+
+
+Usage
+-----
+
+ $ pttc path/to/file.ptt
+
+If no error occurs, the following files will be generated in the current working
+directory:
+
+ file.lst
+ file.bin
+ file.pt
+ file-<tool>.exp
+ file-<src>.sb
+
+The `.lst` and `.bin` files are generated by a call to yasm. The `.pt` file
+contains the Processor Trace and the `.exp` files contain the content of the
+comments after the `.exp` directive for tool `<tool>` (see below). The `.sb`
+files contain sideband infomrmation from source `<src>` (see below).
+
+Pttc prints the filenames of the generated `.exp` and `.sb` files to stdout.
+
+
+Syntax
+------
+
+Pttc allows annotations in the comments of yasm assembler source files. The
+parser recognizes all comments that contain the `@pt` directive marker.
+
+Every pt directive can be preceded by a label name followed by a colon (`:`).
+Refer to the description of the `.exp()` and `.sb()` directives below on how to
+use these labels.
+
+The general syntax for pt directives is as follows:
+
+ @pt [label:]directive([arguments])
+
+
+### Intel PT directives
+
+This section lists the directives that are understood by pttc.
+
+
+#### psb, psbend, pad, ovf, stop
+
+ @pt psb()
+ @pt psbend()
+ @pt pad()
+ @pt ovf()
+ @pt stop()
+
+These packets do not have any arguments and correspond to the packets from the
+specification.
+
+
+#### tnt, tnt64
+
+ @pt tnt(args)
+ @pt tnt64(args)
+
+The arguments of the tnt and tnt64 packets is a list of Takens `t` and
+Not-Takens `n`. For better readability an arbitrary number of blanks and dots
+can be intervened.
+
+It is an error if no characters, only blanks or dots, or other characters are in
+the payload. Additionally for the TNT packet and the TNT64 packet it is an error
+to have more than 6 and more than 47 t's or n's in the payload, respectively.
+
+
+#### tip, tip.pge, tip.pgd, fup
+
+ @pt tip(ipc: addr)
+ @pt tip.pge(ipc: addr)
+ @pt tip.pgd(ipc: addr)
+ @pt fup(ipc: addr)
+
+These packets accept arbitrary addresses. `Addr` must be a parsable integer or a
+valid label name. `Ipc` specifies the IP compression bits as integer number.
+
+If `addr` is given as a label, the address is truncated according to the IP
+bytes value given in `ipc`. Otherwise the address needs to be a zero-extended
+integer no bigger than specified in `ipc`.
+
+
+#### mode.exec, mode.tsx
+
+ @pt mode.exec(mode)
+ @pt mode.tsx(state)
+
+`Mode` must be either `16bit` or `32bit` or `64bit`; `state` must be `begin` or
+`abort` or `commit`.
+
+
+#### pip
+
+ @pt pip(addr[, nr])
+
+Addr is the value that was written to CR3.
+
+If nr is specified after addr, the non-root bit is set.
+
+
+#### tsc
+
+ @pt tsc(value)
+
+Value is the timestamp.
+
+
+#### cbr
+
+ @pt cbr(value)
+
+Value is the core/bus ratio.
+
+
+#### tma
+
+ @pt tma(ctc, fc)
+
+Ctc is the 16bit crystal clock component.
+Fc is the 9bit fast counter component.
+
+
+#### mtc
+
+ @pt mtc(value)
+
+Value is the 8bit crystal clock component.
+
+
+#### cyc
+
+ @pt cyc(value)
+
+Value is the cycle count.
+
+
+#### vmcs
+
+ @pt vmcs(value)
+
+Value is the VMCS base address. Beware that only bits 12 to 51 will be used.
+The rest will be silently ignored.
+
+
+#### mnt
+
+ @pt mnt(value)
+
+Value is the 8-byte packet payload represented as 64-bit little-endian number.
+
+
+#### exstop
+
+ @pt exstop([ip])
+
+If ip is specified, the IP bit in the packet opcode is set, it is clear
+otherwise.
+
+
+#### mwait
+
+ @pt mwait(hints, ext)
+
+Hints is the 4-byte mwait hints argument in eax. Ext is the 4-byte extensions
+argument in ecx.
+
+
+#### pwre
+
+ @pt pwre(c-state[, hw])
+
+C-state is a thread C-state with optional sub C-state in the format
+`c<state>[.<sub>]` where both `<state>` and `<sub>` are decimal integer values
+between 0 and 15. If the sub C-state is not specified, it defaults to c0.
+
+If hw is specified, the C-state entry was initiated by hardware.
+
+
+#### pwrx
+
+ @pt pwrx(wr: last, deepest)
+
+Wr is the wake reason. It must be `int`, `st`, or `hw`.
+
+Last and deepest are the last and deepest achieved core C-state in the format
+`c<state>` where `<state>` is a decimal integer value between 0 and 15.
+
+
+#### ptw
+
+ @pt ptw(size: payload[, ip])
+
+Size is the payload size; it must be 0 or 1. Payload is the unsigned integer
+payload. If ip is specified, the IP bit in the packet opcode is set, it is
+clear otherwise.
+
+
+#### .exp
+
+ @pt .exp([tool])
+
+Every occurrence of this directive prints all the lines, following this
+directive, to a `file[-tool].exp`.
+
+The first occurrence of this directive stops processing of other directives.
+
+In order to have a valid yasm file, it is necessary to put the expected output
+into yasm comments (with the semi-colon character (`;`)). Any character up to
+(and including) the semi-colon is not printed to the `.exp` file. Trailing white
+space is removed from each line.
+
+Comments are made with the `#` character and go to the end of line. Comments
+and whitespace before comments are not printed in the `.exp` file.
+
+Each line that contains no yasm comment at all is not printed to the exp file.
+Empty lines can be used to structure the expected output text.
+
+In `.exp` files and in sideband directives, the address of a yasm label can be
+substituted using:
+
+ %[?0]label[.<number>].
+
+
+Labels are prefixed with `%`, for example, `%%label`. A label name can consist
+of alphanumeric characters and underscores. Labels must be unique. The address
+of label will be substituted with a hex number including leading `0x`.
+
+Prefixing the label with `0`, for example `%0label`, prints the address with
+leading zeroes using 16 hex digits plus the leading `0x`.
+
+The least significant `n` bytes of an address can be masked by appending `.n` to
+the label. For example, `%%label.2` with `label` = `0xffffff004c` is printed as
+`0x4c`.
+
+Prefixing the label with `?` in combination with masking replaces the masked out
+parts with `?` using 16 digits for the address plus the leading `0x`. The
+remaining number is zero extended. For example, `%?label.2` with `label` =
+`0xc0001` is printed as `0x????????????0001`.
+
+The packet number of pt directives can also be substituted in the output. These
+numbers are printed in decimal. The syntax is as follows:
+
+ %label
+
+
+### Special Labels
+
+There is a special label for the byte offset after the last packet: `%%eos`.
+
+
+Labels in sections are relative to the section's vstart address. PTTC also adds
+the following special section labels:
+
+ * *section_<name>_start* gives the section's offset in the binary file
+ * *section_<name>_vstart* gives the virtual base address of the mapped section
+ * *section_<name>_length* gives the size of the section in bytes
+
+Beware that PTTC does not support switching back and forth between sections.
+
+
+### Sideband Directives
+
+This section lists the sideband directives that are understood by pttc.
+
+
+#### primary/secondary [requires SIDEBAND]
+
+ @sb primary(format [,src])
+ @sb secondary(format [,src])
+
+Every occurrence of this directive switches the current sideband file to
+`file[-src]-format-primary.sb` or `file[-src]-format-secondary.sb` respectively.
+Every subsequent sideband directive will write to the current sideband file.
+
+A primary sideband file is directly related to the trace. For example, it may
+contain the sideband information for the traced cpu. A secondary sideband file
+is indirectly related to the trace. For example, it may contain the sideband
+information for other cpus on the system.
+
+Sideband directive and Intel PT directives can be mixed.
+
+
+#### raw [requires SIDEBAND]
+
+ @sb raw-8(value)
+ @sb raw-16(value)
+ @sb raw-32(value)
+ @sb raw-64(value)
+
+Writes a raw unsigned 8, 16, 32, or 64 bit value into the current sideband
+stream.
+
+
+#### pevent-sample_type [requires SIDEBAND, PEVENT]
+
+ @sb pevent-sample_type(t1[, t2[, t3[...]]])
+
+Sets the perf_event sample_type for subsequent pevent sideband directives for
+the current sideband file to the bit-wise or '|' of all arguments. Each
+argument can be:
+
+ * *tid* representing PERF_SAMPLE_TID
+ * *time* representing PERF_SAMPLE_TIME
+ * *id* representing PERF_SAMPLE_ID
+ * *cpu* representing PERF_SAMPLE_CPU
+ * *stream* representing PERF_SAMPLE_STREAM_ID
+ * *identifier* representing PERF_SAMPLE_IDENTIFIER
+ * a 64bit unsigned integer representing a bit-mask of
+ enum perf_event_sample_format values
+
+
+Subsequent perf event record generating directives must provide the specified
+number of sample arguments in the above order order. The `tid` sample type
+takes two arguments: a pid followed by a tid.
+
+This directive may only be used before the first perf event record generating
+directive.
+
+
+#### pevent-mmap [requires SIDEBAND, PEVENT]
+
+ @sb pevent-mmap(pid, tid, addr, len, pgoff, filename[, samples])
+
+Writes a PERF_RECORD_MMAP event into the current sideband stream describing the
+mapping of filename.
+
+The `samples` argument is a comma-separated list of samples corresponding to the
+pevent-sample_type configuration.
+
+
+#### pevent-mmap-section [requires SIDEBAND, PEVENT]
+
+ @sb pevent-mmap-section(name, pid, tid[, samples])
+
+Writes a PERF_RECORD_MMAP event into the current sideband stream describing the
+mapping of section `name` to its vstart address from its start address in the
+test binary.
+
+The `samples` argument is a comma-separated list of samples corresponding to the
+pevent-sample_type configuration.
+
+
+#### pevent-lost [requires SIDEBAND, PEVENT]
+
+ @sb pevent-lost(id, lost[, samples])
+
+Writes a PERF_RECORD_LOST event into the current sideband stream describing the
+loss of perf_event records.
+
+The `samples` argument is a comma-separated list of samples corresponding to the
+pevent-sample_type configuration.
+
+
+#### pevent-comm [requires SIDEBAND, PEVENT]
+
+ @sb pevent-comm(pid, tid, comm[, samples])
+ @sb pevent-comm.exec(pid, tid, comm[, samples])
+
+Writes a PERF_RECORD_COMM event into the current sideband stream describing the
+command that is being traced.
+
+The `samples` argument is a comma-separated list of samples corresponding to the
+pevent-sample_type configuration.
+
+
+#### pevent-exit [requires SIDEBAND, PEVENT]
+
+ @sb pevent-exit(pid, ppid, tid, ptid, time[, samples])
+
+Writes a PERF_RECORD_EXIT event into the current sideband stream describing the
+exiting of the current thread. The thread is still running in kernel space but
+won't return to user space.
+
+The `samples` argument is a comma-separated list of samples corresponding to the
+pevent-sample_type configuration.
+
+
+#### pevent-fork [requires SIDEBAND, PEVENT]
+
+ @sb pevent-fork(pid, ppid, tid, ptid, time[, samples])
+
+Writes a PERF_RECORD_FORK event into the current sideband stream describing the
+creation of a new thread or process. The event occurs in the context of the
+parent thread.
+
+The `samples` argument is a comma-separated list of samples corresponding to the
+pevent-sample_type configuration.
+
+
+#### pevent-aux [requires SIDEBAND, PEVENT]
+
+ @sb pevent-aux(offset, size, flags[, samples])
+
+Writes a PERF_RECORD_AUX event into the current sideband stream describing that
+new data landed in the aux buffer.
+
+The `samples` argument is a comma-separated list of samples corresponding to the
+pevent-sample_type configuration.
+
+
+#### pevent-itrace-start [requires SIDEBAND, PEVENT]
+
+ @sb pevent-itrace-start(pid, tid[, samples])
+
+Writes a PERF_RECORD_ITRACE_START event into the current sideband stream
+describing that instruction tracing has started.
+
+The `samples` argument is a comma-separated list of samples corresponding to the
+pevent-sample_type configuration.
+
+
+#### pevent-lost-samples [requires SIDEBAND, PEVENT]
+
+ @sb pevent-lost-samples(lost[, samples])
+
+Writes a PERF_RECORD_LOST_SAMPLES event into the current sideband stream
+describing a loss of sample records.
+
+The `samples` argument is a comma-separated list of samples corresponding to the
+pevent-sample_type configuration.
+
+
+#### pevent-switch [requires SIDEBAND, PEVENT]
+
+ @sb pevent-switch.in([samples])
+ @sb pevent-switch.out([samples])
+
+Writes a PERF_RECORD_SWITCH event into the current sideband stream describing a
+switch into or out of context.
+
+The `samples` argument is a comma-separated list of samples corresponding to the
+pevent-sample_type configuration.
+
+
+#### pevent-switch-cpu-wide [requires SIDEBAND, PEVENT]
+
+ @sb pevent-switch-cpu-wide.in(pid, tid[, samples])
+ @sb pevent-switch-cpu-wide.out(pid, tid[, samples])
+
+Writes a PERF_RECORD_SWITCH_CPU_WIDE event into the current sideband stream
+describing a switch into or out of context. The `pid` and `tid` arguments give
+the process and thread id of the previous task.
+
+The `samples` argument is a comma-separated list of samples corresponding to the
+pevent-sample_type configuration.
diff --git a/doc/man/CMakeLists.txt b/doc/man/CMakeLists.txt
new file mode 100644
index 000000000000..050c43155dd3
--- /dev/null
+++ b/doc/man/CMakeLists.txt
@@ -0,0 +1,147 @@
+# Copyright (c) 2015-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+file(MAKE_DIRECTORY ${MAN_OUTPUT_DIRECTORY}/man3)
+
+find_program(PANDOC pandoc
+ DOC "Path to pandoc; used for building man pages."
+)
+
+function(add_man_page filename section function)
+ set(input ${CMAKE_CURRENT_SOURCE_DIR}/${filename})
+ set(output ${MAN_OUTPUT_DIRECTORY}/man${section}/${function}.${section})
+
+ add_custom_command(
+ OUTPUT ${output}
+ COMMAND ${PANDOC} -s -f markdown -t man -o ${output} ${input}
+ MAIN_DEPENDENCY ${filename}
+ )
+endfunction(add_man_page)
+
+function(install_man_page section function)
+ install(
+ FILES ${MAN_OUTPUT_DIRECTORY}/man${section}/${function}.${section}
+ DESTINATION ${CMAKE_INSTALL_MANDIR}/man${section}
+ )
+endfunction(install_man_page)
+
+function(add_man_page_alias section function alias)
+ set(output ${MAN_OUTPUT_DIRECTORY}/man${section}/${alias}.${section})
+
+ file(WRITE ${output} ".so man${section}/${function}.${section}\n")
+
+ install_man_page(${section} ${alias})
+endfunction(add_man_page_alias)
+
+set(MAN3_FUNCTIONS
+ pt_library_version
+ pt_config
+ pt_packet
+ pt_alloc_encoder
+ pt_enc_get_offset
+ pt_enc_get_config
+ pt_pkt_alloc_decoder
+ pt_pkt_sync_forward
+ pt_pkt_get_offset
+ pt_qry_alloc_decoder
+ pt_qry_sync_forward
+ pt_qry_get_offset
+ pt_qry_cond_branch
+ pt_qry_event
+ pt_qry_time
+ pt_image_alloc
+ pt_image_add_file
+ pt_image_remove_by_filename
+ pt_image_set_callback
+ pt_insn_alloc_decoder
+ pt_insn_sync_forward
+ pt_insn_get_offset
+ pt_insn_get_image
+ pt_insn_next
+ pt_iscache_alloc
+ pt_iscache_add_file
+ pt_iscache_read
+ pt_iscache_set_limit
+ pt_blk_alloc_decoder
+ pt_blk_sync_forward
+ pt_blk_get_offset
+ pt_blk_next
+)
+
+foreach (function ${MAN3_FUNCTIONS})
+ set(MAN_PAGES ${MAN_PAGES} ${MAN_OUTPUT_DIRECTORY}/man3/${function}.3)
+
+ add_man_page(${function}.3.md 3 ${function})
+ install_man_page(3 ${function})
+endforeach ()
+
+add_man_page_alias(3 pt_config pt_cpu_errata)
+add_man_page_alias(3 pt_packet pt_enc_next)
+add_man_page_alias(3 pt_packet pt_pkt_next)
+add_man_page_alias(3 pt_alloc_encoder pt_free_encoder)
+add_man_page_alias(3 pt_enc_get_offset pt_enc_sync_set)
+add_man_page_alias(3 pt_enc_get_config pt_pkt_get_config)
+add_man_page_alias(3 pt_enc_get_config pt_qry_get_config)
+add_man_page_alias(3 pt_enc_get_config pt_insn_get_config)
+add_man_page_alias(3 pt_enc_get_config pt_blk_get_config)
+add_man_page_alias(3 pt_pkt_alloc_decoder pt_pkt_free_decoder)
+add_man_page_alias(3 pt_pkt_sync_forward pt_pkt_sync_backward)
+add_man_page_alias(3 pt_pkt_sync_forward pt_pkt_sync_set)
+add_man_page_alias(3 pt_pkt_get_offset pt_pkt_get_sync_offset)
+add_man_page_alias(3 pt_qry_alloc_decoder pt_qry_free_decoder)
+add_man_page_alias(3 pt_qry_sync_forward pt_qry_sync_backward)
+add_man_page_alias(3 pt_qry_sync_forward pt_qry_sync_set)
+add_man_page_alias(3 pt_qry_get_offset pt_qry_get_sync_offset)
+add_man_page_alias(3 pt_qry_cond_branch pt_qry_indirect_branch)
+add_man_page_alias(3 pt_qry_time pt_qry_core_bus_ratio)
+add_man_page_alias(3 pt_qry_time pt_insn_time)
+add_man_page_alias(3 pt_qry_time pt_insn_core_bus_ratio)
+add_man_page_alias(3 pt_qry_time pt_blk_time)
+add_man_page_alias(3 pt_qry_time pt_blk_core_bus_ratio)
+add_man_page_alias(3 pt_qry_event pt_insn_event)
+add_man_page_alias(3 pt_qry_event pt_blk_event)
+add_man_page_alias(3 pt_image_alloc pt_image_free)
+add_man_page_alias(3 pt_image_alloc pt_image_name)
+add_man_page_alias(3 pt_image_add_file pt_image_copy)
+add_man_page_alias(3 pt_image_add_file pt_image_add_cached)
+add_man_page_alias(3 pt_image_remove_by_filename pt_image_remove_by_asid)
+add_man_page_alias(3 pt_insn_alloc_decoder pt_insn_free_decoder)
+add_man_page_alias(3 pt_insn_sync_forward pt_insn_sync_backward)
+add_man_page_alias(3 pt_insn_sync_forward pt_insn_sync_set)
+add_man_page_alias(3 pt_insn_get_offset pt_insn_get_sync_offset)
+add_man_page_alias(3 pt_insn_get_image pt_insn_set_image)
+add_man_page_alias(3 pt_insn_get_image pt_blk_get_image)
+add_man_page_alias(3 pt_insn_get_image pt_blk_set_image)
+add_man_page_alias(3 pt_insn_next pt_insn)
+add_man_page_alias(3 pt_iscache_alloc pt_iscache_free)
+add_man_page_alias(3 pt_iscache_alloc pt_iscache_name)
+add_man_page_alias(3 pt_blk_alloc_decoder pt_blk_free_decoder)
+add_man_page_alias(3 pt_blk_sync_forward pt_blk_sync_backward)
+add_man_page_alias(3 pt_blk_sync_forward pt_blk_sync_set)
+add_man_page_alias(3 pt_blk_get_offset pt_blk_get_sync_offset)
+add_man_page_alias(3 pt_blk_next pt_block)
+
+add_custom_target(man ALL DEPENDS ${MAN_PAGES})
diff --git a/doc/man/pt_alloc_encoder.3.md b/doc/man/pt_alloc_encoder.3.md
new file mode 100644
index 000000000000..6eb9ba5ecc3a
--- /dev/null
+++ b/doc/man/pt_alloc_encoder.3.md
@@ -0,0 +1,96 @@
+% PT_ALLOC_ENCODER(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_alloc_encoder, pt_free_encoder - allocate/free an Intel(R) Processor Trace
+packet encoder
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_packet_encoder \***
+| **pt_alloc_encoder(const struct pt_config \**config*);**
+|
+| **void pt_free_encoder(struct pt_packet_encoder \**encoder*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_alloc_encoder**() allocates a new Intel Processor Trace (Intel PT) packet
+encoder and returns a pointer to it. The packet encoder generates Intel PT
+trace from *pt_packet* objects. See **pt_enc_next**(3).
+
+The *config* argument points to a *pt_config* object. See **pt_config**(3).
+The *config* argument will not be referenced by the returned encoder but the
+trace buffer defined by the *config* argument's *begin* and *end* fields will.
+
+The returned packet encoder is initially synchronized onto the beginning of the
+trace buffer specified in its *config* argument. Use **pt_enc_sync_set**(3) to
+move it to any other position inside the trace buffer.
+
+**pt_free_encoder**() frees the Intel PT packet encoder pointed to by encoder*.
+*The *encoder* argument must be NULL or point to an encoder that has been
+*allocated by a call to **pt_alloc_encoder**().
+
+
+# RETURN VALUE
+
+**pt_alloc_encoder**() returns a pointer to a *pt_packet_encoder* object on
+success or NULL in case of an error.
+
+
+# EXAMPLE
+
+~~~{.c}
+int foo(const struct pt_config *config) {
+ struct pt_packet_encoder *encoder;
+ errcode;
+
+ encoder = pt_alloc_encoder(config);
+ if (!encoder)
+ return pte_nomem;
+
+ errcode = bar(encoder);
+
+ pt_free_encoder(encoder);
+ return errcode;
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_config**(3), **pt_enc_sync_set**(3), **pt_enc_get_offset**(3),
+**pt_enc_get_config**(3), **pt_enc_next**(3)
diff --git a/doc/man/pt_blk_alloc_decoder.3.md b/doc/man/pt_blk_alloc_decoder.3.md
new file mode 100644
index 000000000000..ec4c2c22febd
--- /dev/null
+++ b/doc/man/pt_blk_alloc_decoder.3.md
@@ -0,0 +1,98 @@
+% PT_BLK_ALLOC_DECODER(3)
+
+<!---
+ ! Copyright (c) 2016-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_blk_alloc_decoder, pt_blk_free_decoder - allocate/free an Intel(R) Processor
+Trace block decoder
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_block_decoder \***
+| **pt_blk_alloc_decoder(const struct pt_config \**config*);**
+|
+| **void pt_blk_free_decoder(struct pt_block_decoder \**decoder*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+A block decoder decodes raw Intel Processor Trace (Intel PT) into a sequence of
+blocks of instructions described by the *pt_block* structure. See
+**pt_blk_next**(3).
+
+**pt_blk_alloc_decoder**() allocates a new block decoder and returns a pointer
+to it. The *config* argument points to a *pt_config* object. See
+**pt_config**(3). The *config* argument will not be referenced by the returned
+decoder but the trace buffer defined by the *config* argument's *begin* and
+*end* fields will.
+
+The returned block decoder needs to be synchronized onto the trace stream before
+it can be used. To synchronize the decoder, use **pt_blk_sync_forward**(3),
+**pt_blk_sync_backward**(3), or **pt_blk_sync_set**(3).
+
+**pt_blk_free_decoder**() frees the Intel PT block decoder pointed to by
+*decoder*. The *decoder* argument must be NULL or point to a decoder that has
+been allocated by a call to **pt_blk_alloc_decoder**().
+
+
+# RETURN VALUE
+
+**pt_blk_alloc_decoder**() returns a pointer to a *pt_block_decoder* object on
+success or NULL in case of an error.
+
+
+# EXAMPLE
+
+~~~{.c}
+ struct pt_block_decoder *decoder;
+ int errcode;
+
+ decoder = pt_blk_alloc_decoder(config);
+ if (!decoder)
+ return pte_nomem;
+
+ errcode = decode(decoder);
+
+ pt_blk_free_decoder(decoder);
+ return errcode;
+~~~
+
+
+# SEE ALSO
+
+**pt_config**(3), **pt_blk_sync_forward**(3), **pt_blk_sync_backward**(3),
+**pt_blk_sync_set**(3), **pt_blk_get_offset**(3), **pt_blk_get_sync_offset**(3),
+**pt_blk_get_image**(3), **pt_blk_set_image**(3), **pt_blk_get_config**(3),
+**pt_blk_time**(3), **pt_blk_core_bus_ratio**(3), **pt_blk_next**(3)
diff --git a/doc/man/pt_blk_get_offset.3.md b/doc/man/pt_blk_get_offset.3.md
new file mode 100644
index 000000000000..548772bd410d
--- /dev/null
+++ b/doc/man/pt_blk_get_offset.3.md
@@ -0,0 +1,82 @@
+% PT_BLK_GET_OFFSET(3)
+
+<!---
+ ! Copyright (c) 2016-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_blk_get_offset, pt_blk_get_sync_offset - get an Intel(R) Processor Trace
+block decoder's current/synchronization trace buffer offset
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_blk_get_offset(const struct pt_block_decoder \**decoder*,**
+| **uint64_t \**offset*);**
+| **int pt_blk_get_sync_offset(const struct pt_block_decoder \**decoder*,**
+| **uint64_t \**offset*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_blk_get_offset**() provides *decoder*'s current position as offset in
+bytes from the beginning of *decoder*'s trace buffer in the unsigned integer
+variable pointed to by *offset*.
+
+**pt_blk_get_sync_offset**() provides *decoder*'s last synchronization point as
+offset in bytes from the beginning of *decoder*'s trace buffer in the unsigned
+integer variable pointed to by *offset*.
+
+
+# RETURN VALUE
+
+Both functions return zero on success or a negative *pt_error_code* enumeration
+constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* or *offset* argument is NULL.
+
+pte_nosync
+: *decoder* has not been synchronized onto the trace stream. Use
+ **pt_blk_sync_forward**(3), **pt_blk_sync_backward**(3), or
+ **pt_blk_sync_set**(3) to synchronize *decoder*.
+
+
+# SEE ALSO
+
+**pt_blk_alloc_decoder**(3), **pt_blk_free_decoder**(3),
+**pt_blk_sync_forward**(3), **pt_blk_sync_backward**(3),
+**pt_blk_sync_set**(3), **pt_blk_get_config**(3), **pt_blk_time**(3),
+**pt_blk_core_bus_ratio**(3), **pt_blk_next**(3)
diff --git a/doc/man/pt_blk_next.3.md b/doc/man/pt_blk_next.3.md
new file mode 100644
index 000000000000..a4939332f182
--- /dev/null
+++ b/doc/man/pt_blk_next.3.md
@@ -0,0 +1,285 @@
+% PT_BLK_NEXT(3)
+
+<!---
+ ! Copyright (c) 2016-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO NEXT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_blk_next, pt_block - iterate over blocks of traced instructions
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_block;**
+|
+| **int pt_blk_next(struct pt_blk_decoder \**decoder*,**
+| **struct pt_blk \**blk*, size_t *size*);**
+|
+| **int pt_blk_next(struct pt_block_decoder \**decoder*,**
+| **struct pt_block \**block*, size_t *size*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_blk_next**() provides the next block of instructions in execution order,
+which is described by the *pt_block* structure.
+
+The *size* argument must be set to *sizeof(struct pt_block)*. The function will
+provide at most *size* bytes of the *pt_block* structure. A newer decoder
+library may truncate an extended *pt_block* object to *size* bytes.
+
+An older decoder library may provide less *pt_block* fields. Fields that are
+not provided will be zero-initialized. For fields where zero is a valid value
+(e.g. for bit-fields), check the decoder library version to determine which
+fields are valid. See **pt_library_version**(3).
+
+On success, the next block of instructions is provided in the *pt_block* object
+pointed to by the *block* argument. The *pt_block* structure is declared as:
+
+~~~{.c}
+/** A block of instructions.
+ *
+ * Instructions in this block are executed sequentially but are not necessarily
+ * contiguous in memory. Users are expected to follow direct branches.
+ */
+struct pt_block {
+ /** The IP of the first instruction in this block. */
+ uint64_t ip;
+
+ /** The IP of the last instruction in this block.
+ *
+ * This can be used for error-detection.
+ */
+ uint64_t end_ip;
+
+ /** The image section that contains the instructions in this block.
+ *
+ * A value of zero means that the section did not have an identifier.
+ * The section was not added via an image section cache or the memory
+ * was read via the read memory callback.
+ */
+ int isid;
+
+ /** The execution mode for all instructions in this block. */
+ enum pt_exec_mode mode;
+
+ /** The instruction class for the last instruction in this block.
+ *
+ * This field may be set to ptic_error to indicate that the instruction
+ * class is not available. The block decoder may choose to not provide
+ * the instruction class in some cases for performance reasons.
+ */
+ enum pt_insn_class iclass;
+
+ /** The number of instructions in this block. */
+ uint16_t ninsn;
+
+ /** The raw bytes of the last instruction in this block in case the
+ * instruction does not fit entirely into this block's section.
+ *
+ * This field is only valid if \@truncated is set.
+ */
+ uint8_t raw[pt_max_insn_size];
+
+ /** The size of the last instruction in this block in bytes.
+ *
+ * This field is only valid if \@truncated is set.
+ */
+ uint8_t size;
+
+ /** A collection of flags giving additional information about the
+ * instructions in this block.
+ *
+ * - all instructions in this block were executed speculatively.
+ */
+ uint32_t speculative:1;
+
+ /** - the last instruction in this block is truncated.
+ *
+ * It starts in this block's section but continues in one or more
+ * other sections depending on how fragmented the memory image is.
+ *
+ * The raw bytes for the last instruction are provided in \@raw and
+ * its size in \@size in this case.
+ */
+ uint32_t truncated:1;
+};
+~~~
+
+The fields of the *pt_block* structure are described in more detail below:
+
+ip
+: The virtual address of the first instruction in the block. The address
+ should be interpreted in the current address space context.
+
+end_ip
+: The virtual address of the last instruction in the block. The address
+ should be interpreted in the current address space context.
+
+ This can be used for error detection. Reconstruction of the instructions in
+ a block should end with the last instruction at *end_ip*.
+
+isid
+: The image section identifier of the section from which the block of
+ instructions originated. This will be zero unless the instructions came
+ from a section that was added via an image section cache. See
+ **pt_image_add_cached**(3).
+
+ The image section identifier can be used for reading the memory containing
+ an instruction in order to decode it and for tracing an instruction back to
+ its binary file and from there to source code.
+
+mode
+: The execution mode at which the instructions in the block were executed.
+ The *pt_exec_mode* enumeration is declared as:
+
+~~~{.c}
+/** An execution mode. */
+enum pt_exec_mode {
+ ptem_unknown,
+ ptem_16bit,
+ ptem_32bit,
+ ptem_64bit
+};
+~~~
+
+iclass
+: A coarse classification of the last instruction in the block. This may be
+ *ptic_error* to indicate that the classification is not available.
+
+ The block decoder knows the instruction class of the instruction that ended
+ the block most of the time. If it does, it provides this information to
+ save the caller the effort of decoding the instruction in some cases.
+
+ninsn
+: The number of instructions contained in this block.
+
+ The instructions are sequential in the sense that no trace is required for
+ reconstructing them. They are not necessarily contiguous in memory.
+
+ The IP of the first instruction is given in the *ip* field and the IP of
+ other instructions can be determined by decoding and examining the previous
+ instruction.
+
+raw
+: If the last instruction of this block can not be read entirely from this
+ block's section, this field provides the instruction's raw bytes.
+
+ It is only valid if the *truncated* flag is set.
+
+size
+: If the last instruction of this block can not be read entirely from this
+ block's section, this field provides the instruction's size in bytes.
+
+ It is only valid if the *truncated* flag is set.
+
+speculative
+: A flag giving the speculative execution status of all instructions in the
+ block. If set, the instructions were executed speculatively. Otherwise,
+ the instructions were executed normally.
+
+truncated
+: A flag saying whether the last instruction in this block can not be read
+ entirely from this block's section. Some bytes need to be read from one or
+ more other sections. This can happen when an image section is partially
+ overwritten by another image section.
+
+ If set, the last instruction's memory is provided in *raw* and its size in
+ *size*.
+
+
+# RETURN VALUE
+
+**pt_blk_next**() returns zero or a positive value on success or a negative
+*pt_error_code* enumeration constant in case of an error.
+
+On success, a bit-vector of *pt_status_flag* enumeration constants is returned.
+The *pt_status_flag* enumeration is declared as:
+
+~~~{.c}
+/** Decoder status flags. */
+enum pt_status_flag {
+ /** There is an event pending. */
+ pts_event_pending = 1 << 0,
+
+ /** The address has been suppressed. */
+ pts_ip_suppressed = 1 << 1,
+
+ /** There is no more trace data available. */
+ pts_eos = 1 << 2
+};
+~~~
+
+The *pts_event_pending* flag indicates that one or more events are pending. Use
+**pt_blk_event**(3) to process pending events before calling **pt_blk_next**()
+again.
+
+The *pt_eos* flag indicates that the information contained in the Intel PT
+stream has been consumed. Further calls to **pt_blk_next**() will continue to
+provide blocks for instructions as long as the instruction's addresses can be
+determined without further trace.
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* or *block* argument is NULL or the *size* argument is too
+ small.
+
+pte_eos
+: Decode reached the end of the trace stream.
+
+pte_nosync
+: The decoder has not been synchronized onto the trace stream. Use
+ **pt_blk_sync_forward**(3), **pt_blk_sync_backward**(3), or
+ **pt_blk_sync_set**(3) to synchronize *decoder*.
+
+pte_bad_opc
+: The decoder encountered an unsupported Intel PT packet opcode.
+
+pte_bad_packet
+: The decoder encountered an unsupported Intel PT packet payload.
+
+pte_bad_query
+: Execution flow reconstruction and trace got out of sync.
+
+ This typically means that, on its way to the virtual address of the next
+ event, the decoder encountered a conditional or indirect branch for which it
+ did not find guidance in the trace.
+
+
+# SEE ALSO
+
+**pt_blk_alloc_decoder**(3), **pt_blk_free_decoder**(3),
+**pt_blk_sync_forward**(3), **pt_blk_sync_backward**(3),
+**pt_blk_sync_set**(3), **pt_blk_time**(3), **pt_blk_core_bus_ratio**(3),
+**pt_blk_event**(3)
diff --git a/doc/man/pt_blk_sync_forward.3.md b/doc/man/pt_blk_sync_forward.3.md
new file mode 100644
index 000000000000..c6b01be37051
--- /dev/null
+++ b/doc/man/pt_blk_sync_forward.3.md
@@ -0,0 +1,152 @@
+% PT_BLK_SYNC_FORWARD(3)
+
+<!---
+ ! Copyright (c) 2016-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_blk_sync_forward, pt_blk_sync_backward, pt_blk_sync_set - synchronize an
+Intel(R) Processor Trace block decoder
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_blk_sync_forward(struct pt_block_decoder \**decoder*);**
+| **int pt_blk_sync_backward(struct pt_block_decoder \**decoder*);**
+| **int pt_blk_sync_set(struct pt_block_decoder \**decoder*,**
+| **uint64_t *offset*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+These functions synchronize an Intel Processor Trace (Intel PT) block decoder
+pointed to by *decoder* onto the trace stream in *decoder*'s trace buffer.
+
+They search for a Packet Stream Boundary (PSB) packet in the trace stream and,
+if successful, set *decoder*'s current position and synchronization position to
+that packet and start processing packets. For synchronization to be
+successfull, there must be a full PSB+ header in the trace stream.
+
+**pt_blk_sync_forward**() searches in forward direction from *decoder*'s
+current position towards the end of the trace buffer. If *decoder* has been
+newly allocated and has not been synchronized yet, the search starts from the
+beginning of the trace.
+
+**pt_blk_sync_backward**() searches in backward direction from *decoder*'s
+current position towards the beginning of the trace buffer. If *decoder* has
+been newly allocated and has not been synchronized yet, the search starts from
+the end of the trace.
+
+**pt_blk_sync_set**() searches at *offset* bytes from the beginning of its
+trace buffer.
+
+
+# RETURN VALUE
+
+All synchronization functions return zero or a positive value on success or a
+negative *pt_error_code* enumeration constant in case of an error.
+
+On success, a bit-vector of *pt_status_flag* enumeration constants is returned.
+The *pt_status_flag* enumeration is declared as:
+
+~~~{.c}
+/** Decoder status flags. */
+enum pt_status_flag {
+ /** There is an event pending. */
+ pts_event_pending = 1 << 0,
+
+ /** The address has been suppressed. */
+ pts_ip_suppressed = 1 << 1,
+
+ /** There is no more trace data available. */
+ pts_eos = 1 << 2
+};
+~~~
+
+The *pts_event_pending* flag indicates that one or more events are pending. Use
+**pt_blk_event**(3) to process pending events before calling **pt_blk_next**(3).
+
+The *pt_eos* flag indicates that the information contained in the Intel PT
+stream has been consumed. Calls to **pt_blk_next**() will provide blocks for
+instructions as long as the instruction's addresses can be determined without
+further trace.
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* argument is NULL.
+
+pte_eos
+: There is no (further) PSB+ header in the trace stream
+ (**pt_blk_sync_forward**() and **pt_blk_sync_backward**()) or at *offset*
+ bytes into the trace buffer (**pt_blk_sync_set**()).
+
+pte_nosync
+: There is no PSB packet at *offset* bytes from the beginning of the trace
+ (**pt_blk_sync_set**() only).
+
+pte_bad_opc
+: The decoder encountered an unsupported Intel PT packet opcode.
+
+pte_bad_packet
+: The decoder encountered an unsupported Intel PT packet payload.
+
+
+# EXAMPLE
+
+The following example re-synchronizes an Intel PT block decoder after decode
+errors:
+
+~~~{.c}
+int foo(struct pt_block_decoder *decoder) {
+ for (;;) {
+ int errcode;
+
+ errcode = pt_blk_sync_forward(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ do {
+ errcode = decode(decoder);
+ } while (errcode >= 0);
+ }
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_blk_alloc_decoder**(3), **pt_blk_free_decoder**(3),
+**pt_blk_get_offset**(3), **pt_blk_get_sync_offset**(3),
+**pt_blk_get_config**(3), **pt_blk_time**(3), **pt_blk_core_bus_ratio**(3),
+**pt_blk_next**(3), **pt_blk_event**(3)
diff --git a/doc/man/pt_config.3.md b/doc/man/pt_config.3.md
new file mode 100644
index 000000000000..2e8466b896fd
--- /dev/null
+++ b/doc/man/pt_config.3.md
@@ -0,0 +1,359 @@
+% PT_CONFIG(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_config, pt_config_init, pt_cpu_errata - Intel(R) Processor Trace
+encoder/decoder configuration
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_config;**
+|
+| **void pt_config_init(struct pt_config \**config*);**
+|
+| **int pt_cpu_errata(struct pt_errata \**errata*, const struct pt_cpu \**cpu*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+The *pt_config* structure defines an Intel Processor Trace (Intel PT) encoder or
+decoder configuration. It is required for allocating a trace packet encoder
+(see **pt_alloc_encoder**(3)), a trace packet decoder (see
+**pt_pkt_alloc_decoder**(3)), a query decoder (see **pt_qry_alloc_decoder**(3)),
+or an instruction flow decoder (see **pt_insn_alloc_decoder**(3)).
+
+**pt_config_init**() zero-initializes its *config* argument and sets *config*'s
+*size* field to *sizeof(struct pt_config)*.
+
+**pt_cpu_errata**() enables workarounds for known errata in its *errata*
+argument for the processor defined by its family/model/stepping in its *cpu*
+argument.
+
+
+The *pt_config* structure is declared as:
+
+~~~{.c}
+/** An Intel PT decoder configuration. */
+struct pt_config {
+ /** The size of the config structure in bytes. */
+ size_t size;
+
+ /** The trace buffer begin address. */
+ uint8_t *begin;
+
+ /** The trace buffer end address. */
+ uint8_t *end;
+
+ /** An optional callback for handling unknown packets.
+ *
+ * If \@callback is not NULL, it is called for any unknown
+ * opcode.
+ */
+ struct {
+ /** The callback function.
+ *
+ * It shall decode the packet at \@pos into \@unknown.
+ * It shall return the number of bytes read upon success.
+ * It shall return a negative pt_error_code otherwise.
+ * The below context is passed as \@context.
+ */
+ int (*callback)(struct pt_packet_unknown *unknown,
+ const struct pt_config *config,
+ const uint8_t *pos, void *context);
+
+ /** The user-defined context for this configuration. */
+ void *context;
+ } decode;
+
+ /** The cpu on which Intel PT has been recorded. */
+ struct pt_cpu cpu;
+
+ /** The errata to apply when encoding or decoding Intel PT. */
+ struct pt_errata errata;
+
+ /** The CTC frequency.
+ *
+ * This is only required if MTC packets have been enabled in
+ * IA32_RTIT_CTRL.MTCEn.
+ */
+ uint32_t cpuid_0x15_eax, cpuid_0x15_ebx;
+
+ /** The MTC frequency as defined in IA32_RTIT_CTL.MTCFreq.
+ *
+ * This is only required if MTC packets have been enabled in
+ * IA32_RTIT_CTRL.MTCEn.
+ */
+ uint8_t mtc_freq;
+
+ /** The nominal frequency as defined in
+ * MSR_PLATFORM_INFO[15:8].
+ *
+ * This is only required if CYC packets have been enabled in
+ * IA32_RTIT_CTRL.CYCEn.
+ *
+ * If zero, timing calibration will only be able to use MTC
+ * and CYC packets.
+ *
+ * If not zero, timing calibration will also be able to use
+ * CBR packets.
+ */
+ uint8_t nom_freq;
+
+ /** A collection of decoder-specific flags. */
+ struct pt_conf_flags flags;
+
+ /** The address filter configuration. */
+ struct pt_conf_addr_filter addr_filter;
+};
+~~~
+
+The fields of the *pt_config* structure are described in more detail below:
+
+size
+: The size of the *pt_config* structure for backward and forward
+ compatibility. Set it to *sizeof(struct pt_config)*.
+
+begin, end
+: The begin and end of a user-allocated memory buffer; *begin* points to
+ the first byte of the buffer, *end* points to one past the last byte in the
+ buffer.
+
+ The packet encoder will generate Intel PT packets into the memory buffer.
+
+ The decoders expect the buffer to contain raw Intel PT packets. They decode
+ directly from the buffer and expect the buffer to remain valid until the
+ decoder has been freed.
+
+decode
+: An optional packet decode callback function. If *decode.callback* is not
+ NULL, it will be called for any unknown packet with the decoder
+ configuration, the current decoder position and with a user-defined context
+ provided in *callback.context* as arguments.
+
+ If the callback function is able to decode the packet, it shall return the
+ size of the decoded packet and provide details in a *pt_packet_unknown*
+ object.
+
+ If the packet cannot be decoded, the callback function shall return a
+ negative *pt_error_code* enumeration constant.
+
+ The *pt_packet_unknown* object can be used to provide user-defined
+ information back to the user when using the packet decoder to iterate over
+ Intel PT packets. Other decoders ignore this information but will skip
+ the packet if a non-zero size is returned by the callback function.
+
+cpu
+: The processor on which the trace has been collected or for which the trace
+ should be generated. The processor is identified by its family, model, and
+ stepping.
+
+~~~{.c}
+/** A cpu vendor. */
+enum pt_cpu_vendor {
+ pcv_unknown,
+ pcv_intel
+};
+
+/** A cpu identifier. */
+struct pt_cpu {
+ /** The cpu vendor. */
+ enum pt_cpu_vendor vendor;
+
+ /** The cpu family. */
+ uint16_t family;
+
+ /** The cpu model. */
+ uint8_t model;
+
+ /** The stepping. */
+ uint8_t stepping;
+};
+~~~
+
+errata
+: The errata workarounds to be applied by the trace encoder or decoder that
+ is created using this configuration.
+
+ The *pt_errata* structure is a collection of one-bit-fields, one for each
+ supported erratum. Duplicate errata are indicated by comments for the
+ erratum for which the workaround was first implemented. Set the field of an
+ erratum to enable the correspondig workaround.
+
+ The *pt_errata* structure is declared as:
+
+~~~{.c}
+/** A collection of Intel PT errata. */
+struct pt_errata {
+ /** BDM70: Intel(R) Processor Trace PSB+ Packets May Contain
+ * Unexpected Packets.
+ *
+ * Same as: SKD024.
+ *
+ * Some Intel Processor Trace packets should be issued only
+ * between TIP.PGE and TIP.PGD packets. Due to this erratum,
+ * when a TIP.PGE packet is generated it may be preceded by a
+ * PSB+ that incorrectly includes FUP and MODE.Exec packets.
+ */
+ uint32_t bdm70:1;
+
+ /** BDM64: An Incorrect LBR or Intel(R) Processor Trace Packet
+ * May Be Recorded Following a Transactional Abort.
+ *
+ * Use of Intel(R) Transactional Synchronization Extensions
+ * (Intel(R) TSX) may result in a transactional abort. If an
+ * abort occurs immediately following a branch instruction,
+ * an incorrect branch target may be logged in an LBR (Last
+ * Branch Record) or in an Intel(R) Processor Trace (Intel(R)
+ * PT) packet before the LBR or Intel PT packet produced by
+ * the abort.
+ */
+ uint32_t bdm64:1;
+
+ [...]
+};
+~~~
+
+cpuid_0x15_eax, cpuid_0x15_ebx
+: The values of *eax* and *ebx* on a *cpuid* call for leaf *0x15*.
+
+ The value *ebx/eax* gives the ratio of the Core Crystal Clock (CTC) to
+ Timestamp Counter (TSC) frequency.
+
+ This field is ignored by the packet encoder and packet decoder. It is
+ required for other decoders if Mini Time Counter (MTC) packets are enabled
+ in the collected trace.
+
+mtc_freq
+: The Mini Time Counter (MTC) frequency as defined in *IA32_RTIT_CTL.MTCFreq*.
+
+ This field is ignored by the packet encoder and packet decoder. It is
+ required for other decoders if Mini Time Counter (MTC) packets are enabled
+ in the collected trace.
+
+nom_freq
+: The nominal or max non-turbo frequency.
+
+ This field is ignored by the packet encoder and packet decoder. It is
+ used by other decoders if Cycle Count (CYC) packets are enabled to improve
+ timing calibration for cycle-accurate tracing.
+
+ If the field is zero, the time tracking algorithm will use Mini Time
+ Counter (MTC) and Cycle Count (CYC) packets for calibration.
+
+ If the field is non-zero, the time tracking algorithm will additionally be
+ able to calibrate at Core:Bus Ratio (CBR) packets.
+
+flags
+: A collection of decoder-specific configuration flags.
+
+addr_filter
+: The address filter configuration. It is declared as:
+
+~~~{.c}
+/** The address filter configuration. */
+struct pt_conf_addr_filter {
+ /** The address filter configuration.
+ *
+ * This corresponds to the respective fields in IA32_RTIT_CTL MSR.
+ */
+ union {
+ uint64_t addr_cfg;
+
+ struct {
+ uint32_t addr0_cfg:4;
+ uint32_t addr1_cfg:4;
+ uint32_t addr2_cfg:4;
+ uint32_t addr3_cfg:4;
+ } ctl;
+ } config;
+
+ /** The address ranges configuration.
+ *
+ * This corresponds to the IA32_RTIT_ADDRn_A/B MSRs.
+ */
+ uint64_t addr0_a;
+ uint64_t addr0_b;
+ uint64_t addr1_a;
+ uint64_t addr1_b;
+ uint64_t addr2_a;
+ uint64_t addr2_b;
+ uint64_t addr3_a;
+ uint64_t addr3_b;
+
+ /* Reserve some space. */
+ uint64_t reserved[8];
+};
+~~~
+
+# RETURN VALUE
+
+**pt_cpu_errata**() returns zero on success or a negative *pt_error_code*
+enumeration constant otherwise.
+
+
+# ERRORS
+
+**pt_cpu_errata**() may return the following errors:
+
+pte_invalid
+: The *errata* or *cpu* argument is NULL.
+
+
+# EXAMPLE
+
+~~~{.c}
+int foo(uint8_t *trace_buffer, size_t size, struct pt_cpu cpu) {
+ struct pt_config config;
+ int errcode;
+
+ pt_config_init(&config);
+ config.begin = trace_buffer;
+ config.end = trace_buffer + size;
+ config.cpu = cpu;
+
+ errcode = pt_cpu_errata(&config.errata, &config.cpu);
+ if (errcode < 0)
+ return errcode;
+
+ [...]
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_alloc_encoder**(3), **pt_pkt_alloc_decoder**(3),
+**pt_qry_alloc_decoder**(3), **pt_insn_alloc_decoder**(3)
diff --git a/doc/man/pt_enc_get_config.3.md b/doc/man/pt_enc_get_config.3.md
new file mode 100644
index 000000000000..91896c28fb27
--- /dev/null
+++ b/doc/man/pt_enc_get_config.3.md
@@ -0,0 +1,77 @@
+% PT_ENC_GET_CONFIG(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_enc_get_config, pt_pkt_get_config, pt_qry_get_config, pt_insn_get_config,
+pt_blk_get_config - get an Intel(R) Processor Trace encoder/decoder's
+configuration
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **const struct pt_config \***
+| **pt_enc_get_config(const struct pt_encoder \**encoder*);**
+|
+| **const struct pt_config \***
+| **pt_pkt_get_config(const struct pt_packet_decoder \**decoder*);**
+|
+| **const struct pt_config \***
+| **pt_qry_get_config(const struct pt_query_decoder \**decoder*);**
+|
+| **const struct pt_config \***
+| **pt_insn_get_config(const struct pt_insn_decoder \**decoder*);**
+|
+| **const struct pt_config \***
+| **pt_blk_get_config(const struct pt_block_decoder \**decoder*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+These functions return a pointer to their argument's configuration. The
+returned configuration object must not be freed. It is valid as long as their
+argument is not freed.
+
+
+# RETURN VALUE
+
+These functions returns a pointer to a *pt_config* object. The returned pointer
+is NULL if their argument is NULL.
+
+
+# SEE ALSO
+
+**pt_config**(3), **pt_alloc_encoder**(3), **pt_pkt_alloc_decoder**(3),
+**pt_qry_alloc_decoder**(3), **pt_insn_alloc_decoder**(3),
+**pt_blk_alloc_decoder**(3)
diff --git a/doc/man/pt_enc_get_offset.3.md b/doc/man/pt_enc_get_offset.3.md
new file mode 100644
index 000000000000..cfb78e260310
--- /dev/null
+++ b/doc/man/pt_enc_get_offset.3.md
@@ -0,0 +1,77 @@
+% PT_ENC_GET_OFFSET(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_enc_get_offset, pt_enc_sync_set - get/set an Intel(R) Processor Trace packet
+encoder's current trace buffer offset
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_enc_get_offset(const struct pt_packet_encoder \**encoder*,**
+| **uint64_t \**offset*);**
+| **int pt_enc_sync_set(const struct pt_packet_encoder \**encoder*,**
+| **uint64_t *offset*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_enc_get_offset**() provides *encoder*'s current position as offset in bytes
+from the beginning of *encoder*'s trace buffer in the unsigned integer variable
+pointed to by *offset*.
+
+**pt_enc_sync_set**() sets *encoder*'s current position to *offset* bytes from
+the beginning of its trace buffer.
+
+
+# RETURN VALUE
+
+Both functions return zero on success or a negative *pt_error_code* enumeration
+constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *encoder* or *offset* (for **pt_enc_sync_set**()) argument is NULL.
+
+pte_eos
+: The *offset* argument is too big and the resulting position would be outside
+ of *encoder*'s trace buffer (**pt_enc_sync_set**() only).
+
+
+# SEE ALSO
+
+**pt_enc_alloc_encoder**(3), **pt_enc_free_encoder**(3), **pt_enc_next**(3)
diff --git a/doc/man/pt_image_add_file.3.md b/doc/man/pt_image_add_file.3.md
new file mode 100644
index 000000000000..4c5ec9863f45
--- /dev/null
+++ b/doc/man/pt_image_add_file.3.md
@@ -0,0 +1,135 @@
+% PT_IMAGE_ADD_FILE(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_image_add_file, pt_image_add_cached, pt_image_copy - add file sections to a
+traced memory image descriptor
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_image_add_file(struct pt_image \**image*, const char \**filename*,**
+| **uint64_t *offset*, uint64_t *size*,**
+| **const struct pt_asid \**asid*, uint64_t *vaddr*);**
+| **int pt_image_add_cached(struct pt_image \**image*,**
+| **struct pt_image_section_cache \**iscache*,**
+| **int *isid*, const struct pt_asid \**asid*);**
+| **int pt_image_copy(struct pt_image \**image*,**
+| **const struct pt_image \**src*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_image_add_file**() adds a new section to a *pt_image* object. The *image*
+argument points to the *pt_image* object to which the new section is added. The
+*filename* argument gives the absolute or relative path to the file on disk that
+contains the section. The *offset* and *size* arguments define the section
+within the file. The *size* argument is silently truncated to end the section
+with the end of the underlying file. The *vaddr* argument gives the virtual
+address at which the section is being loaded.
+
+**pt_image_add_cached**() adds a new section from an image section cache. See
+**pt_iscache_add_file**(3). The *iscache* argument points to the
+*pt_image_section_cache* object containing the section. The *isid* argument
+gives the image section identifier for the desired section in that cache.
+
+The *asid* argument gives an optional address space identifier. If it is not
+NULL, it points to a *pt_asid* structure, which is declared as:
+
+~~~{.c}
+/** An Intel PT address space identifier.
+ *
+ * This identifies a particular address space when adding file
+ * sections or when reading memory.
+ */
+struct pt_asid {
+ /** The size of this object - set to sizeof(struct pt_asid).
+ */
+ size_t size;
+
+ /** The CR3 value. */
+ uint64_t cr3;
+
+ /** The VMCS Base address. */
+ uint64_t vmcs;
+};
+~~~
+
+The *asid* argument can be used to prepare a collection of process, guest, and
+hypervisor images to an Intel(R) Processor Trace (Intel PT) instruction flow
+decoder. The decoder will select the current image based on CR3 and VMCS
+information in the Intel PT trace.
+
+If only the CR3 or only the VMCS field should be considered by the decoder,
+supply *pt_asid_no_cr3* and *pt_asid_no_vmcs* to the other field respectively.
+
+If the *asid* argument is NULL, the file section will be added for all
+processes, guests, and hypervisor images.
+
+If the new section overlaps with an existing section, the existing section is
+truncated or split to make room for the new section.
+
+**pt_image_copy**() adds file sections from the *pt_image* pointed to by the
+*src* argument to the *pt_image* pointed to by the *dst* argument.
+
+
+# RETURN VALUE
+
+**pt_image_add_file**() and **pt_image_add_cached**() return zero on success or
+a negative *pt_error_code* enumeration constant in case of an error.
+
+**pt_image_copy**() returns the number of ignored sections on success or a
+negative *pt_error_code* enumeration constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *image* or *filename* argument is NULL or the *offset* argument is too
+ big such that the section would start past the end of the file
+ (**pt_image_add_file**()).
+ The *image* or *iscache* argument is NULL (**pt_image_add_cached**()).
+ The *src* or *dst* argument is NULL (**pt_image_copy**()).
+
+pte_bad_image
+: The *iscache* does not contain *isid* (**pt_image_add_cached**()).
+
+
+# SEE ALSO
+
+**pt_image_alloc**(3), **pt_image_free**(3),
+**pt_image_remove_by_filename**(3), **pt_image_remove_by_asid**(3),
+**pt_image_set_callback**(3), **pt_insn_set_image**(3),
+**pt_insn_get_image**(3), **pt_iscache_alloc**(3), **pt_iscache_add_file**(3)
diff --git a/doc/man/pt_image_alloc.3.md b/doc/man/pt_image_alloc.3.md
new file mode 100644
index 000000000000..898ddfa3823f
--- /dev/null
+++ b/doc/man/pt_image_alloc.3.md
@@ -0,0 +1,99 @@
+% PT_IMAGE_ALLOC(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_image_alloc, pt_image_free, pt_image_name - allocate/free a traced memory
+image descriptor
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_image \*pt_image_alloc(const char \**name*);**
+| **const char \*pt_image_name(const struct pt_image \**image*);**
+| **void pt_image_free(struct pt_image \**image*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_image_alloc**() allocates a new *pt_image* and returns a pointer to it. A
+*pt_image* object defines the memory image that was traced as a collection of
+file sections and the virtual addresses at which those sections were loaded.
+
+The *name* argument points to an optional zero-terminated name string. If the
+*name* argument is NULL, it will be ignored and the returned *pt_image* object
+will not have a name. Otherwise, the returned *pt_image* object will have a
+copy of the string pointed to by the *name* argument as name.
+
+**pt_image_name**() returns the name of the *pt_image* object the *image*
+argument points to.
+
+**pt_image_free**() frees the *pt_image* object pointed to by *image*. The
+*image* argument must be NULL or point to an image that has been allocated by a
+call to **pt_image_alloc**().
+
+
+# RETURN VALUE
+
+**pt_image_alloc**() returns a pointer to a *pt_image* object on success or NULL
+in case of an error.
+
+**pt_image_name**() returns a pointer to a zero-terminated string of NULL if the
+image does not have a name.
+
+
+# EXAMPLE
+
+~~~{.c}
+int foo(const char *name) {
+ struct pt_image *image;
+ errcode;
+
+ image = pt_image_alloc(name);
+ if (!image)
+ return pte_nomem;
+
+ errcode = bar(image);
+
+ pt_image_free(image);
+ return errcode;
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_image_add_file**(3), **pt_image_add_cached**(3), **pt_image_copy**(3),
+**pt_image_remove_by_filename**(3), **pt_image_remove_by_asid**(3),
+**pt_image_set_callback**(3), **pt_insn_set_image**(3), **pt_insn_get_image**(3)
diff --git a/doc/man/pt_image_remove_by_filename.3.md b/doc/man/pt_image_remove_by_filename.3.md
new file mode 100644
index 000000000000..6ab3ebf871e2
--- /dev/null
+++ b/doc/man/pt_image_remove_by_filename.3.md
@@ -0,0 +1,150 @@
+% PT_IMAGE_REMOVE_BY_FILENAME(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_image_remove_by_filename, pt_image_remove_by_asid - remove sections from a
+traced memory image descriptor
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_image_remove_by_filename(struct pt_image \**image*,**
+| **const char \**filename*,**
+| **const struct pt_asid \**asid*);**
+| **int pt_image_remove_by_asid(struct pt_image \**image*,**
+| **const struct pt_asid \**asid*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_image_remove_by_filename**() removes all sections from *image* that were
+added by a call to **pt_image_add_file**(3) with an identical *filename*
+argument or by a call to **pt_image_copy**(3) from such a section. Sections
+that are based on the same underlying file but that were added using a different
+*filename* argument are not removed.
+
+If the *asid* argument is not NULL, it removes only sections that were added
+with a matching address-space identifier. See **pt_image_add_file**(3).
+
+**pt_image_remove_by_asid**(3) removes all sections from *image* that were added
+by a call to **pt_image_add_file**(3) with a matching *asid* argument or by a
+call to **pt_image_copy**(3) from such a section. See **pt_image_add_file**(3).
+
+Two *pt_asid* objects match in their "cr3* or *vmcs* field if one of them does
+not provide the field (i.e. sets it to *pt_asid_no_cr3* or *pt_asid_no_vmcs*
+respectively) or if the provided values are identical. Two *pt_asid* objects
+match if they match in all fields.
+
+
+# RETURN VALUE
+
+Both functions return the number of sections removed on success or a negative
+*pt_error_code* enumeration constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *image* argument is NULL or the *filename* argument is NULL
+ (**pt_image_remove_by_filename**() only).
+
+
+# EXAMPLE
+
+~~~{.c}
+int foo(struct pt_image *image, uint64_t cr3) {
+ struct pt_asid asid1, asid2;
+ int errcode;
+
+ pt_asid_init(&asid1);
+ asid1.cr3 = cr3;
+
+ pt_asid_init(&asid2);
+ asid2.cr3 = ~cr3;
+
+ errcode = pt_image_add_file(image, "/path/to/libfoo.so",
+ 0xa000, 0x100, &asid1, 0xb000);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_image_add_file(image, "rel/path/to/libfoo.so",
+ 0xa000, 0x100, &asid1, 0xc000);
+ if (errcode < 0)
+ return errcode;
+
+ /* This call would only remove the section added first:
+ *
+ * - filename matches only the first section's filename
+ * - NULL matches every asid
+ */
+ (void) pt_image_remove_by_filename(image,
+ "/path/to/libfoo.so",
+ NULL);
+
+ /* This call would not remove any of the above sections:
+ *
+ * - filename matches the first section's filename
+ * - asid2 does not match asid1
+ */
+ (void) pt_image_remove_by_filename(image,
+ "/path/to/libfoo.so",
+ &asid2);
+
+ /* This call would not remove any of the above sections:
+ *
+ * - asid2 does not match asid1
+ */
+ (void) pt_image_remove_by_asid(image, &asid2);
+
+ /* This call would remove both sections:
+ *
+ * - asid1 matches itself
+ */
+ (void) pt_image_remove_by_asid(image, &asid1);
+
+ /* This call would remove both sections:
+ *
+ * - NULL matches every asid
+ */
+ (void) pt_image_remove_by_asid(image, NULL);
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_image_alloc**(3), **pt_image_free**(3), **pt_image_add_file**(3),
+**pt_image_add_cached**(3), **pt_image_copy**(3), **pt_insn_set_image**(3),
+**pt_insn_get_image**(3)
diff --git a/doc/man/pt_image_set_callback.3.md b/doc/man/pt_image_set_callback.3.md
new file mode 100644
index 000000000000..53448e9e14cd
--- /dev/null
+++ b/doc/man/pt_image_set_callback.3.md
@@ -0,0 +1,103 @@
+% PT_IMAGE_SET_CALLBACK(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_image_set_callback - set a traced memory image read memory callback
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **typedef int (read_memory_callback_t)(uint8_t \**buffer*, size_t *size*,**
+| **const struct pt_asid \**asid*,**
+| **uint64_t *ip*, void \**context*);**
+|
+| **int pt_image_set_callback(struct pt_image \**image*,**
+| **read_memory_callback_t \**callback*,**
+| **void \**context*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_image_set_callback**() sets the function pointed to by *callback* as the
+read-memory callback function in the *pt_image* object pointed to by *image*.
+Any previous read-memory callback function is replaced. The read-memory
+callback function can be removed by passing NULL as *callback* argument.
+
+When the Intel(R) Processor Trace (Intel PT) instruction flow decoder that is
+using *image* tries to read memory from a location that is not contained in any
+of the file sections in *image*, it calls the read-memory callback function with
+the following arguments:
+
+buffer
+: A pre-allocated memory buffer to hold the to-be-read memory. The callback
+ function shall provide the read memory in that buffer.
+
+size
+: The size of the memory buffer pointed to by the *buffer* argument.
+
+asid
+: The address-space identifier specifying the process, guest, or hypervisor,
+ in which context the *ip* argument is to be interpreted. See
+ **pt_image_add_file**(3).
+
+ip
+: The virtual address from which *size* bytes of memory shall be read.
+
+context
+: The *context* argument passed to **pt_image_set_callback**().
+
+The callback function shall return the number of bytes read on success (no more
+than *size*) or a negative *pt_error_code* enumeration constant in case of an
+error.
+
+
+# RETURN VALUE
+
+**pt_image_set_callback**() returns zero on success or a negative
+*pt_error_code* enumeration constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: If the *image* argument is NULL.
+
+
+# SEE ALSO
+
+**pt_image_alloc**(3), **pt_image_free**(3), **pt_image_add_file**(3),
+**pt_image_add_cached**(3), pt_image_copy**(3),
+**pt_image_remove_by_filename**(3), pt_image_remove_by_asid**(3),
+**pt_insn_set_image**(3), pt_insn_get_image**(3)
diff --git a/doc/man/pt_insn_alloc_decoder.3.md b/doc/man/pt_insn_alloc_decoder.3.md
new file mode 100644
index 000000000000..dfaade4ada19
--- /dev/null
+++ b/doc/man/pt_insn_alloc_decoder.3.md
@@ -0,0 +1,101 @@
+% PT_INSN_ALLOC_DECODER(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_insn_alloc_decoder, pt_insn_free_decoder - allocate/free an Intel(R)
+Processor Trace instruction flow decoder
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_insn_decoder \***
+| **pt_insn_alloc_decoder(const struct pt_config \**config*);**
+|
+| **void pt_insn_free_decoder(struct pt_insn_decoder \**decoder*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+An instruction flow decoder decodes raw Intel Processor Trace (Intel PT) into a
+sequence of instructions described by the *pt_insn* structure. See
+**pt_insn_next**(3).
+
+**pt_insn_alloc_decoder**() allocates a new instruction flow decoder and returns
+a pointer to it. The *config* argument points to a *pt_config* object. See
+**pt_config**(3). The *config* argument will not be referenced by the returned
+decoder but the trace buffer defined by the *config* argument's *begin* and
+*end* fields will.
+
+The returned instruction flow decoder needs to be synchronized onto the trace
+stream before it can be used. To synchronize the instruction flow decoder, use
+**pt_insn_sync_forward**(3), **pt_insn_sync_backward**(3), or
+**pt_insn_sync_set**(3).
+
+**pt_insn_free_decoder**() frees the Intel PT instruction flow decoder pointed
+to by *decoder*. The *decoder* argument must be NULL or point to a decoder that
+has been allocated by a call to **pt_insn_alloc_decoder**().
+
+
+# RETURN VALUE
+
+**pt_insn_alloc_decoder**() returns a pointer to a *pt_insn_decoder* object on
+success or NULL in case of an error.
+
+
+# EXAMPLE
+
+~~~{.c}
+int foo(const struct pt_config *config) {
+ struct pt_insn_decoder *decoder;
+ errcode;
+
+ decoder = pt_insn_alloc_decoder(config);
+ if (!decoder)
+ return pte_nomem;
+
+ errcode = bar(decoder);
+
+ pt_insn_free_decoder(decoder);
+ return errcode;
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_config**(3), **pt_insn_sync_forward**(3), **pt_insn_sync_backward**(3),
+**pt_insn_sync_set**(3), **pt_insn_get_offset**(3), **pt_insn_get_sync_offset**(3),
+**pt_insn_get_image**(3), **pt_insn_set_image**(3), **pt_insn_get_config**(3),
+**pt_insn_time**(3), **pt_insn_core_bus_ratio**(3), **pt_insn_next**(3)
diff --git a/doc/man/pt_insn_get_image.3.md b/doc/man/pt_insn_get_image.3.md
new file mode 100644
index 000000000000..8bc2ebf6b237
--- /dev/null
+++ b/doc/man/pt_insn_get_image.3.md
@@ -0,0 +1,93 @@
+% PT_INSN_GET_IMAGE(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_insn_get_image, pt_insn_set_image, pt_blk_get_image, pt_blk_set_image -
+get/set an Intel(R) Processor Trace instruction flow or block decoder's traced
+memory image descriptor
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_image \*pt_insn_get_image(struct pt_insn_decoder \**decoder*);**
+| **struct pt_image \*pt_blk_get_image(struct pt_block_decoder \**decoder*);**
+|
+| **int pt_insn_set_image(struct pt_insn_decoder \**decoder*,**
+| **struct pt_image \**image*);**
+| **int pt_blk_set_image(struct pt_block_decoder \**decoder*,**
+| **struct pt_image \**image*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_insn_get_image**() and **pt_blk_get_image**() return the traced memory
+*image descriptor that decoder* uses for reading instruction memory. See
+***pt_image_alloc**(3). Every decoder comes with a default *pt_image* object
+*that is initially empty and that will automatically be destroyed when the
+*decoder is freed.
+
+**pt_insn_set_image**() and **pt_blk_set_image**() set the traced memory image
+descriptor that *decoder* uses for reading instruction memory. If the *image*
+argument is NULL, sets *decoder*'s image to be its default image. The user is
+responsible for freeing the *pt_image* object that *image* points to when it is
+no longer needed.
+
+
+# RETURN VALUE
+
+**pt_insn_get_image**() and **pt_blk_get_image**() return a pointer to
+*decoder*'s *pt_image* object. The returned pointer is NULL if the *decoder*
+argument is NULL.
+
+**pt_insn_set_image**() and **pt_blk_set_image**() return zero on success or a
+negative *pt_error_code* enumeration constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* argument is NULL.
+
+
+# NOTES
+
+One *pt_image* object must not be shared between multiple decoders. Use
+**pt_image_copy**(3) to copy a common image.
+
+
+# SEE ALSO
+
+**pt_insn_alloc_decoder**(3), **pt_insn_free_decoder**(3), **pt_insn_next**(3),
+**pt_blk_alloc_decoder**(3), **pt_blk_free_decoder**(3), **pt_blk_next**(3)
diff --git a/doc/man/pt_insn_get_offset.3.md b/doc/man/pt_insn_get_offset.3.md
new file mode 100644
index 000000000000..7b680ed5dbc7
--- /dev/null
+++ b/doc/man/pt_insn_get_offset.3.md
@@ -0,0 +1,82 @@
+% PT_INSN_GET_OFFSET(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_insn_get_offset, pt_insn_get_sync_offset - get an Intel(R) Processor Trace
+instruction flow decoder's current/synchronization trace buffer offset
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_insn_get_offset(const struct pt_insn_decoder \**decoder*,**
+| **uint64_t \**offset*);**
+| **int pt_insn_get_sync_offset(const struct pt_insn_decoder \**decoder*,**
+| **uint64_t \**offset*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_insn_get_offset**() provides *decoder*'s current position as offset in
+bytes from the beginning of *decoder*'s trace buffer in the unsigned integer
+variable pointed to by *offset*.
+
+**pt_insn_get_sync_offset**() provides *decoder*'s last synchronization point as
+offset in bytes from the beginning of *decoder*'s trace buffer in the unsigned
+integer variable pointed to by *offset*.
+
+
+# RETURN VALUE
+
+Both functions return zero on success or a negative *pt_error_code* enumeration
+constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* or *offset* argument is NULL.
+
+pte_nosync
+: *decoder* has not been synchronized onto the trace stream. Use
+ **pt_insn_sync_forward**(3), **pt_insn_sync_backward**(3), or
+ **pt_insn_sync_set**(3) to synchronize *decoder*.
+
+
+# SEE ALSO
+
+**pt_insn_alloc_decoder**(3), **pt_insn_free_decoder**(3),
+**pt_insn_sync_forward**(3), **pt_insn_sync_backward**(3),
+**pt_insn_sync_set**(3), **pt_insn_get_config**(3), **pt_insn_time**(3),
+**pt_insn_core_bus_ratio**(3), **pt_insn_next**(3)
diff --git a/doc/man/pt_insn_next.3.md b/doc/man/pt_insn_next.3.md
new file mode 100644
index 000000000000..ed4332ae4b5b
--- /dev/null
+++ b/doc/man/pt_insn_next.3.md
@@ -0,0 +1,264 @@
+% PT_INSN_NEXT(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO NEXT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_insn_next, pt_insn - iterate over traced instructions
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_insn;**
+|
+| **int pt_insn_next(struct pt_insn_decoder \**decoder*,**
+| **struct pt_insn \**insn*, size_t *size*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_insn_next**() provides the next instruction in execution order, which is
+described by the *pt_insn* structure.
+
+The *size* argument must be set to *sizeof(struct pt_insn)*. The function will
+provide at most *size* bytes of the *pt_insn* structure. A newer decoder
+library may truncate an extended *pt_insn* object to *size* bytes.
+
+An older decoder library may provide less *pt_insn* fields. Fields that are not
+provided will be zero-initialized. For fields where zero is a valid value
+(e.g. for bit-fields), check the decoder library version to determine which
+fields are valid. See **pt_library_version**(3).
+
+On success, the next instruction is provided in the *pt_insn* object pointed to
+by the *insn* argument. The *pt_insn* structure is declared as:
+
+~~~{.c}
+/** A single traced instruction. */
+struct pt_insn {
+ /** The virtual address in its process. */
+ uint64_t ip;
+
+ /** The image section identifier for the section containing this
+ * instruction.
+ *
+ * A value of zero means that the section did not have an identifier.
+ * The section was not added via an image section cache or the memory
+ * was read via the read memory callback.
+ */
+ int isid;
+
+ /** The execution mode. */
+ enum pt_exec_mode mode;
+
+ /** A coarse classification. */
+ enum pt_insn_class iclass;
+
+ /** The raw bytes. */
+ uint8_t raw[pt_max_insn_size];
+
+ /** The size in bytes. */
+ uint8_t size;
+
+ /** A collection of flags giving additional information:
+ *
+ * - the instruction was executed speculatively.
+ */
+ uint32_t speculative:1;
+
+ /** - this instruction is truncated in its image section.
+ *
+ * It starts in the image section identified by \@isid and continues
+ * in one or more other sections.
+ */
+ uint32_t truncated:1;
+};
+~~~
+
+The fields of the *pt_insn* structure are described in more detail below:
+
+ip
+: The virtual address of the instruction. The address should be interpreted
+ in the current address space context.
+
+isid
+: The image section identifier of the section from which the instruction
+ originated. This will be zero unless the instruction came from a section
+ that was added via an image section cache. See **pt_image_add_cached**(3).
+
+ The image section identifier can be used to trace an instruction back to
+ its binary file and from there to source code.
+
+mode
+: The execution mode at which the instruction was executed. The
+ *pt_exec_mode* enumeration is declared as:
+
+~~~{.c}
+/** An execution mode. */
+enum pt_exec_mode {
+ ptem_unknown,
+ ptem_16bit,
+ ptem_32bit,
+ ptem_64bit
+};
+~~~
+
+iclass
+: A coarse classification of the instruction suitable for constructing a call
+ back trace. The *pt_insn_class* enumeration is declared as:
+
+~~~{.c}
+/** The instruction class.
+ *
+ * We provide only a very coarse classification suitable for
+ * reconstructing the execution flow.
+ */
+enum pt_insn_class {
+ /* The instruction could not be classified. */
+ ptic_error,
+
+ /* The instruction is something not listed below. */
+ ptic_other,
+
+ /* The instruction is a near (function) call. */
+ ptic_call,
+
+ /* The instruction is a near (function) return. */
+ ptic_return,
+
+ /* The instruction is a near unconditional jump. */
+ ptic_jump,
+
+ /* The instruction is a near conditional jump. */
+ ptic_cond_jump,
+
+ /* The instruction is a call-like far transfer.
+ * E.g. SYSCALL, SYSENTER, or FAR CALL.
+ */
+ ptic_far_call,
+
+ /* The instruction is a return-like far transfer.
+ * E.g. SYSRET, SYSEXIT, IRET, or FAR RET.
+ */
+ ptic_far_return,
+
+ /* The instruction is a jump-like far transfer.
+ * E.g. FAR JMP.
+ */
+ ptic_far_jump
+};
+~~~
+
+raw
+: The memory containing the instruction.
+
+size
+: The size of the instruction in bytes.
+
+speculative
+: A flag giving the speculative execution status of the instruction. If set,
+ the instruction was executed speculatively. Otherwise, the instruction was
+ executed normally.
+
+truncated
+: A flag saying whether this instruction spans more than one image section.
+ If clear, this instruction originates from a single section identified by
+ *isid*. If set, the instruction overlaps two or more image sections. In
+ this case, *isid* identifies the section that contains the first byte.
+
+
+# RETURN VALUE
+
+**pt_insn_next**() returns zero or a positive value on success or a negative
+*pt_error_code* enumeration constant in case of an error.
+
+On success, a bit-vector of *pt_status_flag* enumeration constants is returned.
+The *pt_status_flag* enumeration is declared as:
+
+~~~{.c}
+/** Decoder status flags. */
+enum pt_status_flag {
+ /** There is an event pending. */
+ pts_event_pending = 1 << 0,
+
+ /** The address has been suppressed. */
+ pts_ip_suppressed = 1 << 1,
+
+ /** There is no more trace data available. */
+ pts_eos = 1 << 2
+};
+~~~
+
+The *pts_event_pending* flag indicates that one or more events are pending. Use
+**pt_insn_event**(3) to process pending events before calling **pt_insn_next**()
+again.
+
+The *pt_eos* flag indicates that the information contained in the Intel PT
+stream has been consumed. Further calls to **pt_insn_next**() will continue to
+provide instructions as long as the instruction's address can be determined
+without further trace.
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* or *insn* argument is NULL or the *size* argument is too
+ small.
+
+pte_eos
+: Decode reached the end of the trace stream.
+
+pte_nosync
+: The decoder has not been synchronized onto the trace stream. Use
+ **pt_insn_sync_forward**(3), **pt_insn_sync_backward**(3), or
+ **pt_insn_sync_set**(3) to synchronize *decoder*.
+
+pte_bad_opc
+: The decoder encountered an unsupported Intel PT packet opcode.
+
+pte_bad_packet
+: The decoder encountered an unsupported Intel PT packet payload.
+
+pte_bad_query
+: Execution flow reconstruction and trace got out of sync.
+
+ This typically means that, on its way to the virtual address of the next
+ event, the decoder encountered a conditional or indirect branch for which it
+ did not find guidance in the trace.
+
+
+# SEE ALSO
+
+**pt_insn_alloc_decoder**(3), **pt_insn_free_decoder**(3),
+**pt_insn_sync_forward**(3), **pt_insn_sync_backward**(3),
+**pt_insn_sync_set**(3), **pt_insn_time**(3), **pt_insn_core_bus_ratio**(3),
+**pt_insn_event**(3)
diff --git a/doc/man/pt_insn_sync_forward.3.md b/doc/man/pt_insn_sync_forward.3.md
new file mode 100644
index 000000000000..0306b9813ea4
--- /dev/null
+++ b/doc/man/pt_insn_sync_forward.3.md
@@ -0,0 +1,153 @@
+% PT_INSN_SYNC_FORWARD(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_insn_sync_forward, pt_insn_sync_backward, pt_insn_sync_set - synchronize an
+Intel(R) Processor Trace instruction flow decoder
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_insn_sync_forward(struct pt_insn_decoder \**decoder*);**
+| **int pt_insn_sync_backward(struct pt_insn_decoder \**decoder*);**
+| **int pt_insn_sync_set(struct pt_insn_decoder \**decoder*,**
+| **uint64_t *offset*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+These functions synchronize an Intel Processor Trace (Intel PT) instruction flow
+decoder pointed to by *decoder* onto the trace stream in *decoder*'s trace
+buffer.
+
+They search for a Packet Stream Boundary (PSB) packet in the trace stream and,
+if successful, set *decoder*'s current position and synchronization position to
+that packet and start processing packets. For synchronization to be
+successfull, there must be a full PSB+ header in the trace stream.
+
+**pt_insn_sync_forward**() searches in forward direction from *decoder*'s
+current position towards the end of the trace buffer. If *decoder* has been
+newly allocated and has not been synchronized yet, the search starts from the
+beginning of the trace.
+
+**pt_insn_sync_backward**() searches in backward direction from *decoder*'s
+current position towards the beginning of the trace buffer. If *decoder* has
+been newly allocated and has not been synchronized yet, the search starts from
+the end of the trace.
+
+**pt_insn_sync_set**() searches at *offset* bytes from the beginning of its
+trace buffer.
+
+
+# RETURN VALUE
+
+All synchronization functions return zero or a positive value on success or a
+negative *pt_error_code* enumeration constant in case of an error.
+
+On success, a bit-vector of *pt_status_flag* enumeration constants is returned.
+The *pt_status_flag* enumeration is declared as:
+
+~~~{.c}
+/** Decoder status flags. */
+enum pt_status_flag {
+ /** There is an event pending. */
+ pts_event_pending = 1 << 0,
+
+ /** The address has been suppressed. */
+ pts_ip_suppressed = 1 << 1,
+
+ /** There is no more trace data available. */
+ pts_eos = 1 << 2
+};
+~~~
+
+The *pts_event_pending* flag indicates that one or more events are pending. Use
+**pt_insn_event**(3) to process pending events before calling
+**pt_insn_next**(3).
+
+The *pt_eos* flag indicates that the information contained in the Intel PT
+stream has been consumed. Calls to **pt_insn_next**() will provide instructions
+as long as the instruction's address can be determined without trace.
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* argument is NULL.
+
+pte_eos
+: There is no (further) PSB+ header in the trace stream
+ (**pt_insn_sync_forward**() and **pt_insn_sync_backward**()) or at *offset*
+ bytes into the trace buffer (**pt_insn_sync_set**()).
+
+pte_nosync
+: There is no PSB packet at *offset* bytes from the beginning of the trace
+ (**pt_insn_sync_set**() only).
+
+pte_bad_opc
+: The decoder encountered an unsupported Intel PT packet opcode.
+
+pte_bad_packet
+: The decoder encountered an unsupported Intel PT packet payload.
+
+
+# EXAMPLE
+
+The following example re-synchronizes an Intel PT instruction flow decoder after
+decode errors:
+
+~~~{.c}
+int foo(struct pt_insn_decoder *decoder) {
+ for (;;) {
+ int status;
+
+ status = pt_insn_sync_forward(decoder);
+ if (status < 0)
+ return status;
+
+ do {
+ status = decode(decoder, status);
+ } while (status >= 0);
+ }
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_insn_alloc_decoder**(3), **pt_insn_free_decoder**(3),
+**pt_insn_get_offset**(3), **pt_insn_get_sync_offset**(3),
+**pt_insn_get_config**(3), **pt_insn_time**(3), **pt_insn_core_bus_ratio**(3),
+**pt_insn_next**(3), **pt_insn_event**(3)
diff --git a/doc/man/pt_iscache_add_file.3.md b/doc/man/pt_iscache_add_file.3.md
new file mode 100644
index 000000000000..c679bf415401
--- /dev/null
+++ b/doc/man/pt_iscache_add_file.3.md
@@ -0,0 +1,98 @@
+% PT_ISCACHE_ADD_FILE(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_iscache_add_file - add file sections to a traced memory image section cache
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_iscache_add_file(struct pt_image_section_cache \**iscache*,**
+| **const char \**filename*, uint64_t *offset*,**
+| **uint64_t *size*, uint64_t *vaddr*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_iscache_add_file**() adds a new section consisting of *size* bytes starting
+at *offset* in *filename* loaded at *vaddr* to *iscache*.
+
+On success, **pt_iscache_add_file**() returns a positive integer identifier that
+uniquely identifies the added section in that cache. This identifier can be
+used to add sections from an image section cache to one or more traced memory
+images. See **pt_image_add_cached**(3). Sections added from an image section
+cache will be shared across images. It can also be used to read memory from the
+cached section. See **pt_iscache_read**(3).
+
+If the cache already contains a suitable section, no section is added and the
+identifier for the existing section is returned. If the cache already contains
+a section that only differs in the load address, a new section is added that
+shares the underlying file section.
+
+
+# RETURN VALUE
+
+**pt_iscache_add_file**() returns a positive image section identifier on success
+or a negative *pt_error_code* enumeration constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *iscache* or *filename* argument is NULL or the *offset* argument is too
+ big such that the section would start past the end of the file.
+
+
+# EXAMPLE
+
+~~~{.c}
+int add_file(struct pt_image_section_cache *iscache, struct pt_image *image,
+ const char *filename, uint64_t offset, uint64_t size,
+ uint64_t vaddr, const struct pt_asid *asid) {
+ int isid;
+
+ isid = pt_iscache_add_file(iscache, filename, offset, size, vaddr);
+ if (isid < 0)
+ return isid;
+
+ return pt_image_add_cached(image, iscache, isid, asid);
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_iscache_alloc**(3), **pt_iscache_free**(3), **pt_iscache_read**(3),
+**pt_image_add_cached**(3)
diff --git a/doc/man/pt_iscache_alloc.3.md b/doc/man/pt_iscache_alloc.3.md
new file mode 100644
index 000000000000..d9ee2b5768a0
--- /dev/null
+++ b/doc/man/pt_iscache_alloc.3.md
@@ -0,0 +1,102 @@
+% PT_ISCACHE_ALLOC(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_iscache_alloc, pt_iscache_free, pt_iscache_name - allocate/free a traced memory
+image section cache
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_image_section_cache \*pt_iscache_alloc(const char \**name*);**
+| **const char \*pt_iscache_name(const struct pt_image_section_cache \**iscache*);**
+| **void pt_iscache_free(struct pt_image_section_cache \**iscache*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_iscache_alloc**() allocates a new *pt_image_section_cache* and returns a
+pointer to it. A *pt_image_section_cache* object contains a collection of file
+sections and the virtual addresses at which those sections were loaded.
+
+The image sections can be added to one or more *pt_image* objects. The
+underlying file sections will be mapped once and their content will be shared
+across images.
+
+The *name* argument points to an optional zero-terminated name string. If the
+*name* argument is NULL, it will be ignored and the returned
+*pt_image_section_cache* object will not have a name. Otherwise, the returned
+*pt_image_section_object* object will have a copy of the string pointed to by
+the *name* argument as name.
+
+**pt_iscache_name**() returns the name of the *pt_image_section_cache* object
+the *iscache* argument points to.
+
+**pt_iscache_free**() frees the *pt_image_section_cache* object pointed to by
+*iscache*. The *iscache* argument must be NULL or point to an image section
+cache that has been allocated by a call to **pt_iscache_alloc**().
+
+
+# RETURN VALUE
+
+**pt_iscache_alloc**() returns a pointer to a *pt_image_section_cache* object
+on success or NULL in case of an error.
+
+**pt_iscache_name**() returns a pointer to a zero-terminated string of NULL if the
+image section cache does not have a name.
+
+
+# EXAMPLE
+
+~~~{.c}
+int foo(const char *name) {
+ struct pt_image_section_cache *iscache;
+ errcode;
+
+ image = pt_iscache_alloc(name);
+ if (!iscache)
+ return pte_nomem;
+
+ errcode = bar(iscache);
+
+ pt_iscache_free(iscache);
+ return errcode;
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_iscache_add_file**(3), **pt_image_add_cached**(3)
diff --git a/doc/man/pt_iscache_read.3.md b/doc/man/pt_iscache_read.3.md
new file mode 100644
index 000000000000..67ece2c0e5a0
--- /dev/null
+++ b/doc/man/pt_iscache_read.3.md
@@ -0,0 +1,89 @@
+% PT_ISCACHE_READ(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_iscache_read - read memory from a cached file section
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_iscache_read(struct pt_image_section_cache \**iscache*,**
+| **uint8_t \**buffer*, uint64_t *size*, int *isid*,**
+| **uint64_t *vaddr*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_iscache_read**() reads memory from a cached file section. The file section
+must have previously been added by a call to **pt_iscache_add**(3). The
+*iscache* argument points to the *pt_image_section_cache* object. It must be
+the same that was used in the corresponding **pt_iscache_add**(3) call. The
+*buffer* argument must point to a memory buffer of at least *size* bytes. The
+*isid* argument identifies the file section from which memory is read. It must
+be the same identifier that was returned from the corresponding
+**pt_iscache_add**(3) call that added the file section to the cache. The *vaddr*
+argument gives the virtual address from which *size* bytes of memory shall be
+read.
+
+On success, **pt_iscache_read**() copies at most *size* bytes of memory from the
+cached file section identified by *isid* in *iscache* starting at virtual
+address *vaddr* into *buffer* and returns the number of bytes that were copied.
+
+Multiple calls to **pt_iscache_read**() may be necessary if *size* is bigger
+than 4Kbyte or if the read crosses a section boundary.
+
+
+# RETURN VALUE
+
+**pt_iscache_read**() returns the number of bytes that were read on success
+or a negative *pt_error_code* enumeration constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *iscache* or *buffer* argument is NULL or the *size* argument is zero.
+
+pte_bad_image
+: The *iscache* does not contain a section identified by *isid*.
+
+pte_nomap
+: The *vaddr* argument lies outside of the virtual address range of the cached
+ section.
+
+
+# SEE ALSO
+
+**pt_iscache_alloc**(3), **pt_iscache_free**(3), **pt_iscache_add**(3)
diff --git a/doc/man/pt_iscache_set_limit.3.md b/doc/man/pt_iscache_set_limit.3.md
new file mode 100644
index 000000000000..1ab93b4dbab9
--- /dev/null
+++ b/doc/man/pt_iscache_set_limit.3.md
@@ -0,0 +1,73 @@
+% PT_ISCACHE_SET_LIMIT(3)
+
+<!---
+ ! Copyright (c) 2017-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_iscache_set_limit - set the mapped image section cache limit
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_iscache_set_limit(struct pt_image_section_cache \**iscache*,**
+| **uint64_t *limit*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_iscache_set_limit**() sets the mapped image section cache limit. The
+*iscache* argument points to the *pt_image_section_cache* object. The *limit*
+argument gives the limit in bytes.
+
+The image section cache will spend at most *limit* bytes to keep image sections
+mapped as opposed to mapping and unmapping them when reading from them. This
+includes the memory for any caches associated with the mapped section.
+
+A *limit* of zero disables caching and clears the cache.
+
+
+# RETURN VALUE
+
+**pt_iscache_set_limit**() returns zero on success or a negative *pt_error_code*
+enumeration constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *iscache* argument is NULL.
+
+
+# SEE ALSO
+
+**pt_iscache_alloc**(3), **pt_iscache_free**(3), **pt_iscache_read**(3)
diff --git a/doc/man/pt_library_version.3.md b/doc/man/pt_library_version.3.md
new file mode 100644
index 000000000000..3867d79cfef0
--- /dev/null
+++ b/doc/man/pt_library_version.3.md
@@ -0,0 +1,72 @@
+% PT_LIBRARY_VERSION(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_library_version - version information
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_version pt_library_version();**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_library_version**() returns the decoder library version.
+
+
+# RETURN VALUE
+
+**pt_library_version**() returns a *pt_version* structure which is declared as:
+
+~~~{.c}
+/** The library version. */
+struct pt_version {
+ /** Major version number. */
+ uint8_t major;
+
+ /** Minor version number. */
+ uint8_t minor;
+
+ /** Reserved bits. */
+ uint16_t reserved;
+
+ /** Build number. */
+ uint32_t build;
+
+ /** Version extension. */
+ const char *ext;
+};
+~~~
diff --git a/doc/man/pt_packet.3.md b/doc/man/pt_packet.3.md
new file mode 100644
index 000000000000..59396000d649
--- /dev/null
+++ b/doc/man/pt_packet.3.md
@@ -0,0 +1,197 @@
+% PT_PACKET(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_packet, pt_enc_next, pt_pkt_next - encode/decode an Intel(R) Processor Trace
+packet
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_packet;**
+|
+| **int pt_enc_next(struct pt_packet_encoder \**encoder*,**
+| **const struct pt_packet \**packet*);**
+|
+| **int pt_pkt_next(struct pt_packet_decoder \**decoder*,**
+| **struct pt_packet \**packet*, size_t *size*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_enc_next**() encodes its *packet* argument as Intel Processor Trace (Intel
+PT) packet at *encoder*'s current position. On success, sets *encoder*'s
+current position to point to the first byte after the encoded packet.
+
+
+**pt_pkt_next**() decodes the Intel PT packet at decoder*'s current position
+into *packet*. On success, sets *decoder*'s current position to point to the
+first byte after the decoded packet.
+
+The caller is responsible for allocating and freeing the *pt_packet* object
+pointed to be the *packet* argument.
+
+The *size* argument of **pt_pkt_next**() must be set to *sizeof(struct
+pt_packet)*. The function will provide at most *size* bytes of packet data. A
+newer decoder library may provide packet types that are not yet defined. Those
+packets may be truncated. Unknown packet types should be ignored.
+
+If the packet decoder does not know the packet opcode at *decoder*'s current
+position and if *decoder*'s configuration contains a packet decode callback
+function, **pt_pkt_next**() will call that callback function to decode the
+unknown packet. On success, a *ppt_unknown* packet type is provided with the
+information provided by the decode callback function.
+
+An Intel PT packet is described by the *pt_packet* structure, which is declared
+as:
+
+~~~{.c}
+/** An Intel PT packet. */
+struct pt_packet {
+ /** The type of the packet.
+ *
+ * This also determines the \@variant field.
+ */
+ enum pt_packet_type type;
+
+ /** The size of the packet including opcode and payload. */
+ uint8_t size;
+
+ /** Packet specific data. */
+ union {
+ /** Packets: pad, ovf, psb, psbend, stop - no payload. */
+
+ /** Packet: tnt-8, tnt-64. */
+ struct pt_packet_tnt tnt;
+
+ /** Packet: tip, fup, tip.pge, tip.pgd. */
+ struct pt_packet_ip ip;
+
+ /** Packet: mode. */
+ struct pt_packet_mode mode;
+
+ /** Packet: pip. */
+ struct pt_packet_pip pip;
+
+ /** Packet: tsc. */
+ struct pt_packet_tsc tsc;
+
+ /** Packet: cbr. */
+ struct pt_packet_cbr cbr;
+
+ /** Packet: tma. */
+ struct pt_packet_tma tma;
+
+ /** Packet: mtc. */
+ struct pt_packet_mtc mtc;
+
+ /** Packet: cyc. */
+ struct pt_packet_cyc cyc;
+
+ /** Packet: vmcs. */
+ struct pt_packet_vmcs vmcs;
+
+ /** Packet: mnt. */
+ struct pt_packet_mnt mnt;
+
+ /** Packet: unknown. */
+ struct pt_packet_unknown unknown;
+ } payload;
+};
+~~~
+
+See the *intel-pt.h* header file for more detail.
+
+
+# RETURN VALUE
+
+**pt_enc_next**() returns the number of bytes written on success or a negative
+*pt_error_code* enumeration constant in case of an error.
+
+**pt_pkt_next**() returns the number of bytes consumed on success or a negative
+*pt_error_code* enumeration constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *encoder*/*decoder* or *packet* argument is NULL or the *size* argument
+ is zero (**pt_pkt_next**() only).
+
+pte_nosync
+: *decoder* has not been synchronized onto the trace stream (**pt_pkt_next**()
+ only). Use **pt_pkt_sync_forward**(3), **pt_pkt_sync_backward**(3), or
+ **pt_pkt_sync_set**(3) to synchronize *decoder*.
+
+pte_eos
+: Encode/decode has reached the end of the trace buffer. There is not enough
+ space in the trace buffer to generate *packet* (**pt_enc_next**()) or the
+ trace buffer does not contain a full Intel PT packet (**pt_pkt_next**()).
+
+pte_bad_opc
+: The type of the *packet* argument is not supported (**pt_enc_next**()) or
+ the packet at *decoder*'s current position is not supported
+ (**pt_pkt_next**()).
+
+pte_bad_packet
+: The payload or parts of the payload of the *packet* argument is not
+ supported (**pt_enc_next**()) or the packet at *decoder*'s current position
+ contains unsupported payload (**pt_pkt_next**()).
+
+
+# EXAMPLE
+
+The example shows a typical Intel PT packet decode loop.
+
+~~~{.c}
+int foo(struct pt_packet_decoder *decoder) {
+ for (;;) {
+ struct pt_packet packet;
+ int errcode;
+
+ errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
+ if (errcode < 0)
+ return errcode;
+
+ [...]
+ }
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_alloc_encoder**(3), **pt_pkt_alloc_decoder**(3),
+**pt_pkt_sync_forward**(3), **pt_pkt_sync_backward**(3), **pt_pkt_sync_set**(3)
diff --git a/doc/man/pt_pkt_alloc_decoder.3.md b/doc/man/pt_pkt_alloc_decoder.3.md
new file mode 100644
index 000000000000..daf3db55972c
--- /dev/null
+++ b/doc/man/pt_pkt_alloc_decoder.3.md
@@ -0,0 +1,98 @@
+% PT_PKT_ALLOC_DECODER(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_pkt_alloc_decoder, pt_pkt_free_decoder - allocate/free an Intel(R) Processor
+Trace packet decoder
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_packet_decoder \***
+| **pt_pkt_alloc_decoder(const struct pt_config \**config*);**
+|
+| **void pt_pkt_free_decoder(struct pt_packet_decoder \**decoder*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_pkt_alloc_decoder**() allocates a new Intel Processor Trace (Intel PT)
+packet decoder and returns a pointer to it. The packet decoder decodes raw
+Intel PT trace into a stream of *pt_packet* objects. See **pt_pkt_next**(3).
+
+The *config* argument points to a *pt_config* object. See **pt_config**(3).
+The *config* argument will not be referenced by the returned decoder but the
+trace buffer defined by the *config* argument's *begin* and *end* fields will.
+
+The returned packet decoder needs to be synchronized onto the trace stream
+before it can be used. To synchronize the packet decoder, use
+**pt_pkt_sync_forward**(3), **pt_pkt_sync_backward**(3), or
+**pt_pkt_sync_set**(3).
+
+**pt_pkt_free_decoder**() frees the Intel PT packet decoder pointed to by
+*decoder*. The *decoder* argument must be NULL or point to a decoder that has
+been allocated by a call to **pt_pkt_alloc_decoder**().
+
+
+# RETURN VALUE
+
+**pt_pkt_alloc_decoder**() returns a pointer to a *pt_packet_decoder* object on
+success or NULL in case of an error.
+
+
+# EXAMPLE
+
+~~~{.c}
+int foo(const struct pt_config *config) {
+ struct pt_packet_decoder *decoder;
+ errcode;
+
+ decoder = pt_pkt_alloc_decoder(config);
+ if (!decoder)
+ return pte_nomem;
+
+ errcode = bar(decoder);
+
+ pt_pkt_free_decoder(decoder);
+ return errcode;
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_config**(3), **pt_pkt_sync_forward**(3), **pt_pkt_sync_backward**(3),
+**pt_pkt_sync_set**(3), **pt_pkt_get_offset**(3), **pt_pkt_get_sync_offset**(3),
+**pt_pkt_get_config**(3), **pt_pkt_next**(3)
diff --git a/doc/man/pt_pkt_get_offset.3.md b/doc/man/pt_pkt_get_offset.3.md
new file mode 100644
index 000000000000..21427f84efd5
--- /dev/null
+++ b/doc/man/pt_pkt_get_offset.3.md
@@ -0,0 +1,81 @@
+% PT_PKT_GET_OFFSET(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_pkt_get_offset, pt_pkt_get_sync_offset - get an Intel(R) Processor Trace
+packet decoder's current/synchronization trace buffer offset
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_pkt_get_offset(const struct pt_packet_decoder \**decoder*,**
+| **uint64_t \**offset*);**
+| **int pt_pkt_get_sync_offset(const struct pt_packet_decoder \**decoder*,**
+| **uint64_t \**offset*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_pkt_get_offset**() provides *decoder*'s current position as offset in bytes
+from the beginning of *decoder*'s trace buffer in the unsigned integer variable
+pointed to by *offset*.
+
+**pt_pkt_get_sync_offset**() provides *decoder*'s last synchronization point as
+offset in bytes from the beginning of *decoder*'s trace buffer in the unsigned
+integer variable pointed to by *offset*.
+
+
+# RETURN VALUE
+
+Both functions return zero on success or a negative *pt_error_code* enumeration
+constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* or *offset* argument is NULL.
+
+pte_nosync
+: *decoder* has not been synchronized onto the trace stream. Use
+ **pt_pkt_sync_forward**(3), **pt_pkt_sync_backward**(3), or
+ **pt_pkt_sync_set**(3) to synchronize *decoder*.
+
+
+# SEE ALSO
+
+**pt_pkt_alloc_decoder**(3), **pt_pkt_free_decoder**(3),
+**pt_pkt_sync_forward**(3), **pt_pkt_sync_backward**(3),
+**pt_pkt_sync_set**(3), **pt_pkt_next**(3)
diff --git a/doc/man/pt_pkt_sync_forward.3.md b/doc/man/pt_pkt_sync_forward.3.md
new file mode 100644
index 000000000000..6ff975f78ea6
--- /dev/null
+++ b/doc/man/pt_pkt_sync_forward.3.md
@@ -0,0 +1,115 @@
+% PT_PKT_SYNC_FORWARD(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_pkt_sync_forward, pt_pkt_sync_backward, pt_pkt_sync_set - synchronize an
+Intel(R) Processor Trace packet decoder
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_pkt_sync_forward(struct pt_packet_decoder \**decoder*);**
+| **int pt_pkt_sync_backward(struct pt_packet_decoder \**decoder*);**
+| **int pt_pkt_sync_set(struct pt_packet_decoder \**decoder*,**
+| **uint64_t *offset*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_pkt_sync_forward**() and **pt_pkt_sync_backward**() synchronize an Intel
+Processor Trace (Intel PT) packet decoder pointed to by *decoder* onto the trace
+stream in *decoder*'s trace buffer. They search for a Packet Stream Boundary
+(PSB) packet in the trace stream and, if successful, set *decoder*'s current
+position to that packet.
+
+**pt_pkt_sync_forward**() searches in forward direction from *decoder*'s current
+position towards the end of the trace buffer. If *decoder* has been newly
+allocated and has not been synchronized yet, the search starts from the
+beginning of the trace.
+
+**pt_pkt_sync_backward**() searches in backward direction from *decoder*'s
+current position towards the beginning of the trace buffer. If *decoder* has
+been newly allocated and has not been synchronized yet, the search starts from
+the end of the trace.
+
+**pt_pkt_sync_set**() sets *decoder*'s current position to *offset* bytes from
+the beginning of its trace buffer.
+
+
+# RETURN VALUE
+
+All synchronization functions return zero or a positive value on success or a
+negative *pt_error_code* enumeration constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* argument is NULL.
+
+pte_eos
+: There is no (further) PSB packet in the trace stream
+ (**pt_pkt_sync_forward**() and **pt_pkt_sync_backward**()) or the *offset*
+ argument is too big and the resulting position would be outside of
+ *decoder*'s trace buffer (**pt_pkt_sync_set**()).
+
+
+# EXAMPLE
+
+The following example re-synchronizes an Intel PT packet decoder after decode
+errors:
+
+~~~{.c}
+int foo(struct pt_packet_decoder *decoder) {
+ for (;;) {
+ int errcode;
+
+ errcode = pt_pkt_sync_forward(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ do {
+ errcode = decode(decoder);
+ } while (errcode >= 0);
+ }
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_pkt_alloc_decoder**(3), **pt_pkt_free_decoder**(3),
+**pt_pkt_get_offset**(3), **pt_pkt_get_sync_offset**(3),
+**pt_pkt_get_config**(3), **pt_pkt_next**(3)
diff --git a/doc/man/pt_qry_alloc_decoder.3.md b/doc/man/pt_qry_alloc_decoder.3.md
new file mode 100644
index 000000000000..7f9d9d4d3ae0
--- /dev/null
+++ b/doc/man/pt_qry_alloc_decoder.3.md
@@ -0,0 +1,113 @@
+% PT_QRY_ALLOC_DECODER(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_qry_alloc_decoder, pt_qry_free_decoder - allocate/free an Intel(R) Processor
+Trace query decoder
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **struct pt_query_decoder \***
+| **pt_qry_alloc_decoder(const struct pt_config \**config*);**
+|
+| **void pt_qry_free_decoder(struct pt_query_decoder \**decoder*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+A query decoder decodes raw Intel Processor Trace (Intel PT) and provides
+functions for querying the decoder about:
+
+ - whether the next conditional branch was taken (or not taken)
+ - the destination of the next indirect branch
+
+This information can be used to reconstruct the execution flow of the traced
+code. As long as the flow is clear, follow the flow. If the flow cannot be
+determined by examining the current instruction, ask the query decoder.
+
+In addition, the query decoder indicates asynchronous events via the return
+value of its query functions and provides an additional function to query for
+such asynchronous events. See **pt_qry_cond_branch**(3),
+**pt_qry_indirect_branch**(3), and **pt_qry_event**(3).
+
+**pt_qry_alloc_decoder**() allocates a new query decoder and returns a pointer
+to it. The *config* argument points to a *pt_config* object. See
+**pt_config**(3). The *config* argument will not be referenced by the returned
+decoder but the trace buffer defined by the *config* argument's *begin* and
+*end* fields will.
+
+The returned query decoder needs to be synchronized onto the trace stream
+before it can be used. To synchronize the query decoder, use
+**pt_qry_sync_forward**(3), **pt_qry_sync_backward**(3), or
+**pt_qry_sync_set**(3).
+
+**pt_qry_free_decoder**() frees the Intel PT query decoder pointed to by
+*decoder*. The *decoder* argument must be NULL or point to a decoder that has
+been allocated by a call to **pt_qry_alloc_decoder**().
+
+
+# RETURN VALUE
+
+**pt_qry_alloc_decoder**() returns a pointer to a *pt_query_decoder* object on
+success or NULL in case of an error.
+
+
+# EXAMPLE
+
+~~~{.c}
+int foo(const struct pt_config *config) {
+ struct pt_query_decoder *decoder;
+ errcode;
+
+ decoder = pt_qry_alloc_decoder(config);
+ if (!decoder)
+ return pte_nomem;
+
+ errcode = bar(decoder);
+
+ pt_qry_free_decoder(decoder);
+ return errcode;
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_config**(3), **pt_qry_sync_forward**(3), **pt_qry_sync_backward**(3),
+**pt_qry_sync_set**(3), **pt_qry_get_offset**(3), **pt_qry_get_sync_offset**(3),
+**pt_qry_get_config**(3), **pt_qry_cond_branch**(3),
+**pt_qry_indirect_branch**(3), **pt_qry_event**(3), **pt_qry_time**(3),
+**pt_qry_core_bus_ratio**(3)
diff --git a/doc/man/pt_qry_cond_branch.3.md b/doc/man/pt_qry_cond_branch.3.md
new file mode 100644
index 000000000000..60df1db6f0f1
--- /dev/null
+++ b/doc/man/pt_qry_cond_branch.3.md
@@ -0,0 +1,152 @@
+% PT_QRY_COND_BRANCH(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_qry_cond_branch, pt_qry_indirect_branch - query an Intel(R) Processor Trace
+query decoder
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_qry_cond_branch(struct pt_query_decoder \**decoder*,**
+| **int \**taken*);**
+| **int pt_qry_indirect_branch(struct pt_query_decoder \**decoder*,**
+| **uint64_t \**ip*);
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_qry_cond_branch**() uses Intel Processor Trace (Intel PT) to determine
+whether the next conditional branch in the traced code was taken or was not
+taken. The *decoder* argument must point to an Intel PT query decoder.
+
+On success, sets the variable the *taken* argument points to a non-zero value
+if the next condition branch is taken and to zero if it is not taken.
+
+**pt_qry_indirect_branch**() uses Intel Processor Trace (Intel PT) to determine
+the destination virtual address of the next indirect branch in the traced code.
+
+On success, provides the destination address in the integer variable pointed to
+be the *ip* argument. If the destination address has been suppressed in the
+Intel PT trace, the lack of an IP is indicated in the return value by setting
+the *pts_ip_suppressed* bit.
+
+
+# RETURN VALUE
+
+Both functions return zero or a positive value on success or a negative
+*pt_error_code* enumeration constant in case of an error.
+
+On success, a bit-vector of *pt_status_flag* enumeration constants is returned.
+The *pt_status_flag* enumeration is declared as:
+
+~~~{.c}
+/** Decoder status flags. */
+enum pt_status_flag {
+ /** There is an event pending. */
+ pts_event_pending = 1 << 0,
+
+ /** The address has been suppressed. */
+ pts_ip_suppressed = 1 << 1,
+
+ /** There is no more trace data available. */
+ pts_eos = 1 << 2
+};
+~~~
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* argument or the *taken* (**pt_qry_cond_branch**()) or *ip*
+ (**pt_qry_indirect_branch**()) argument is NULL.
+
+pte_eos
+: Decode reached the end of the trace stream.
+
+pte_nosync
+: The decoder has not been synchronized onto the trace stream. Use
+ **pt_qry_sync_forward**(3), **pt_qry_sync_backward**(3), or
+ **pt_qry_sync_set**(3) to synchronize *decoder*.
+
+pte_bad_opc
+: The decoder encountered an unsupported Intel PT packet opcode.
+
+pte_bad_packet
+: The decoder encountered an unsupported Intel PT packet payload.
+
+pte_bad_query
+: The query does not match the data provided in the Intel PT stream. Based on
+ the trace, the decoder expected a call to the other query function or a call
+ to **pt_qry_event**(3). This usually means that execution flow
+ reconstruction and trace got out of sync.
+
+
+# EXAMPLE
+
+The following example sketches an execution flow reconstruction loop.
+Asynchronous events have been omitted.
+
+~~~{.c}
+int foo(struct pt_query_decoder *decoder, uint64_t ip) {
+ for (;;) {
+ if (insn_is_cond_branch(ip)) {
+ int errcode, taken;
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ if (errcode < 0)
+ return errcode;
+
+ if (taken)
+ ip = insn_destination(ip);
+ else
+ ip = insn_next_ip(ip);
+ } else if (insn_is_indirect_branch(ip)) {
+ int errcode;
+
+ errcode = pt_qry_indirect_branch(decoder, &ip);
+ if (errcode < 0)
+ return errcode;
+ } else
+ ip = insn_next_ip(ip);
+ }
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_qry_alloc_decoder**(3), **pt_qry_free_decoder**(3),
+**pt_qry_event**(3), **pt_qry_time**(3), **pt_qry_core_bus_ratio**(3)
diff --git a/doc/man/pt_qry_event.3.md b/doc/man/pt_qry_event.3.md
new file mode 100644
index 000000000000..5eb4df82de18
--- /dev/null
+++ b/doc/man/pt_qry_event.3.md
@@ -0,0 +1,291 @@
+% PT_QRY_EVENT(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_qry_event, pt_insn_event, pt_blk_event - query an Intel(R) Processor Trace
+decoder for an asynchronous event
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_qry_event(struct pt_query_decoder \**decoder*,**
+| **struct pt_event \**event*, size_t *size*);**
+|
+| **int pt_insn_event(struct pt_insn_decoder \**decoder*,**
+| **struct pt_event \**event*, size_t *size*);**
+|
+| **int pt_blk_event(struct pt_block_decoder \**decoder*,**
+| **struct pt_event \**event*, size_t *size*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_qry_event**(), **pt_insn_event**(), and **pt_blk_event**() provide the next
+pending asynchronous event in *decoder*'s Intel Processor Trace (Intel PT)
+decode in the *pt_event* object pointed to by the *event* argument.
+
+The *size* argument must be set to *sizeof(struct pt_event)*. The function will
+provide at most *size* bytes of the *pt_event* structure. A newer decoder
+library may provide event types that are not yet defined. Those events may be
+truncated.
+
+On success, detailed information about the event is provided in the *pt_event*
+object pointed to by the *event* argument. The *pt_event* structure is declared
+as:
+
+~~~{.c}
+/** An event. */
+struct pt_event {
+ /** The type of the event. */
+ enum pt_event_type type;
+
+ /** A flag indicating that the event IP has been
+ * suppressed.
+ */
+ uint32_t ip_suppressed:1;
+
+ /** A flag indicating that the event is for status update. */
+ uint32_t status_update:1;
+
+ /** A flag indicating that the event has timing
+ * information.
+ */
+ uint32_t has_tsc:1;
+
+ /** The time stamp count of the event.
+ *
+ * This field is only valid if \@has_tsc is set.
+ */
+ uint64_t tsc;
+
+ /** The number of lost mtc and cyc packets.
+ *
+ * This gives an idea about the quality of the \@tsc. The
+ * more packets were dropped, the less precise timing is.
+ */
+ uint32_t lost_mtc;
+ uint32_t lost_cyc;
+
+ /* Reserved space for future extensions. */
+ uint64_t reserved[2];
+
+ /** Event specific data. */
+ union {
+ /** Event: enabled. */
+ struct {
+ /** The address at which tracing resumes. */
+ uint64_t ip;
+
+ /** A flag indicating that tracing resumes from the IP
+ * at which tracing had been disabled before.
+ */
+ uint32_t resumed:1;
+ } enabled;
+
+ /** Event: disabled. */
+ struct {
+ /** The destination of the first branch inside a
+ * filtered area.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+
+ /* The exact source ip needs to be determined using
+ * disassembly and the filter configuration.
+ */
+ } disabled;
+
+ [...]
+ } variant;
+};
+~~~
+
+See the *intel-pt.h* header file for more detail. The common fields of the
+*pt_event* structure are described in more detail below:
+
+type
+: The type of the event as a *pt_event_type* enumeration, which is declared
+ as:
+
+~~~{.c}
+/** Event types. */
+enum pt_event_type {
+ /* Tracing has been enabled/disabled. */
+ ptev_enabled,
+ ptev_disabled,
+
+ /* Tracing has been disabled asynchronously. */
+ ptev_async_disabled,
+
+ /* An asynchronous branch, e.g. interrupt. */
+ ptev_async_branch,
+
+ /* A synchronous paging event. */
+ ptev_paging,
+
+ /* An asynchronous paging event. */
+ ptev_async_paging,
+
+ /* Trace overflow. */
+ ptev_overflow,
+
+ /* An execution mode change. */
+ ptev_exec_mode,
+
+ /* A transactional execution state change. */
+ ptev_tsx,
+
+ /* Trace Stop. */
+ ptev_stop,
+
+ /* A synchronous vmcs event. */
+ ptev_vmcs,
+
+ /* An asynchronous vmcs event. */
+ ptev_async_vmcs,
+
+ /* Execution has stopped. */
+ ptev_exstop,
+
+ /* An MWAIT operation completed. */
+ ptev_mwait,
+
+ /* A power state was entered. */
+ ptev_pwre,
+
+ /* A power state was exited. */
+ ptev_pwrx,
+
+ /* A PTWRITE event. */
+ ptev_ptwrite,
+
+ /* A timing event. */
+ ptev_tick,
+
+ /* A core:bus ratio event. */
+ ptev_cbr,
+
+ /* A maintenance event. */
+ ptev_mnt
+};
+~~~
+
+ip_suppressed
+: A flag indicating whether the *ip* field in the event-dependent part is not
+ valid because the value has been suppressed in the trace.
+
+status_update
+: A flag indicating whether the event is for updating the decoder's status.
+ Status update events originate from Intel PT packets in PSB+.
+
+has_tsc
+: A flag indicating that the event's timing-related fields *tsc*, *lost_mtc*,
+ and *lost_cyc* are valid.
+
+tsc
+: The last time stamp count before the event. Depending on the timing
+ configuration, the timestamp can be more or less precise. For
+ cycle-accurate tracing, event packets are typically CYC-eligible so the
+ timestamp should be cycle-accurate.
+
+lost_mtc, lost_cyc
+: The number of lost MTC and CYC updates. An update is lost if the decoder
+ was not able to process an MTC or CYC packet due to missing information.
+ This can be either missing calibration or missing configuration information.
+ The number of lost MTC and CYC updates gives a rough idea about the quality
+ of the *tsc* field.
+
+variant
+: This field contains event-specific information. See the *intel-pt.h* header
+ file for details.
+
+
+# RETURN VALUE
+
+**pt_qry_event**(), **pt_insn_event**(), and **pt_blk_event**() return zero or a
+*positive value on success or a negative pt_error_code* enumeration constant in
+*case of an error.
+
+On success, a bit-vector of *pt_status_flag* enumeration constants is returned.
+The *pt_status_flag* enumeration is declared as:
+
+~~~{.c}
+/** Decoder status flags. */
+enum pt_status_flag {
+ /** There is an event pending. */
+ pts_event_pending = 1 << 0,
+
+ /** The address has been suppressed. */
+ pts_ip_suppressed = 1 << 1,
+
+ /** There is no more trace data available. */
+ pts_eos = 1 << 2
+};
+~~~
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* or *event* argument is NULL or the *size* argument is too
+ small.
+
+pte_eos
+: Decode reached the end of the trace stream.
+
+pte_nosync
+: The decoder has not been synchronized onto the trace stream. Use
+ **pt_qry_sync_forward**(3), **pt_qry_sync_backward**(3), or
+ **pt_qry_sync_set**(3) to synchronize *decoder*.
+
+pte_bad_opc
+: The decoder encountered an unsupported Intel PT packet opcode.
+
+pte_bad_packet
+: The decoder encountered an unsupported Intel PT packet payload.
+
+pte_bad_query
+: The query does not match the data provided in the Intel PT stream. Based on
+ the trace, the decoder expected a call to **pt_qry_cond_branch**(3) or
+ **pt_qry_indirect_branch**(3). This usually means that execution flow
+ reconstruction and trace got out of sync.
+
+
+# SEE ALSO
+
+**pt_qry_alloc_decoder**(3), **pt_qry_free_decoder**(3),
+**pt_qry_cond_branch**(3), **pt_qry_indirect_branch**(3), **pt_qry_time**(3),
+**pt_qry_core_bus_ratio**(3), **pt_insn_next**(3), **pt_blk_next**(3)
diff --git a/doc/man/pt_qry_get_offset.3.md b/doc/man/pt_qry_get_offset.3.md
new file mode 100644
index 000000000000..cc74f2c91e84
--- /dev/null
+++ b/doc/man/pt_qry_get_offset.3.md
@@ -0,0 +1,83 @@
+% PT_QRY_GET_OFFSET(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_qry_get_offset, pt_qry_get_sync_offset - get an Intel(R) Processor Trace
+query decoder's current/synchronization trace buffer offset
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_qry_get_offset(const struct pt_query_decoder \**decoder*,**
+| **uint64_t \**offset*);**
+| **int pt_qry_get_sync_offset(const struct pt_query_decoder \**decoder*,**
+| **uint64_t \**offset*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_qry_get_offset**() provides *decoder*'s current position as offset in bytes
+from the beginning of *decoder*'s trace buffer in the unsigned integer variable
+pointed to by *offset*.
+
+**pt_qry_get_sync_offset**() provides *decoder*'s last synchronization point as
+offset in bytes from the beginning of *decoder*'s trace buffer in the unsigned
+integer variable pointed to by *offset*.
+
+
+# RETURN VALUE
+
+Both functions return zero on success or a negative *pt_error_code* enumeration
+constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* or *offset* argument is NULL.
+
+pte_nosync
+: *decoder* has not been synchronized onto the trace stream. Use
+ **pt_qry_sync_forward**(3), **pt_qry_sync_backward**(3), or
+ **pt_qry_sync_set**(3) to synchronize *decoder*.
+
+
+# SEE ALSO
+
+**pt_qry_alloc_decoder**(3), **pt_qry_free_decoder**(3),
+**pt_qry_sync_forward**(3), **pt_qry_sync_backward**(3),
+**pt_qry_sync_set**(3), **pt_qry_get_config**(3), **pt_qry_cond_branch**(3),
+**pt_qry_indirect_branch**(3), **pt_qry_event**(3), **pt_qry_time**(3),
+**pt_qry_core_bus_ratio**(3)
diff --git a/doc/man/pt_qry_sync_forward.3.md b/doc/man/pt_qry_sync_forward.3.md
new file mode 100644
index 000000000000..c82301adc214
--- /dev/null
+++ b/doc/man/pt_qry_sync_forward.3.md
@@ -0,0 +1,152 @@
+% PT_QRY_SYNC_FORWARD(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_qry_sync_forward, pt_qry_sync_backward, pt_qry_sync_set - synchronize an
+Intel(R) Processor Trace query decoder
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_qry_sync_forward(struct pt_query_decoder \**decoder*,**
+| **uint64_t \**ip*);**
+| **int pt_qry_sync_backward(struct pt_query_decoder \**decoder*,**
+| **uint64_t \**ip*);**
+| **int pt_qry_sync_set(struct pt_query_decoder \**decoder*,**
+| **uint64_t \**ip*, uint64_t *offset*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+These functions synchronize an Intel Processor Trace (Intel PT) query decoder
+pointed to by *decoder* onto the trace stream in *decoder*'s trace buffer.
+
+They search for a Packet Stream Boundary (PSB) packet in the trace stream and,
+if successful, set *decoder*'s current position and synchronization position to
+that packet and start processing packets. For synchronization to be
+successfull, there must be a full PSB+ header in the trace stream.
+
+If the *ip* argument is not NULL, these functions provide the code memory
+address at which tracing starts in the variable pointed to by *ip*. If tracing
+is disabled at the synchronization point, the lack of an IP is indicated in the
+return value by setting the *pts_ip_suppressed* bit.
+
+**pt_qry_sync_forward**() searches in forward direction from *decoder*'s current
+position towards the end of the trace buffer. If *decoder* has been newly
+allocated and has not been synchronized yet, the search starts from the
+beginning of the trace.
+
+**pt_qry_sync_backward**() searches in backward direction from *decoder*'s
+current position towards the beginning of the trace buffer. If *decoder* has
+been newly allocated and has not been synchronized yet, the search starts from
+the end of the trace.
+
+**pt_qry_sync_set**() searches at *offset* bytes from the beginning of its trace
+buffer.
+
+
+# RETURN VALUE
+
+All synchronization functions return zero or a positive value on success or a
+negative *pt_error_code* enumeration constant in case of an error.
+
+On success, a bit-vector of *pt_status_flag* enumeration constants is returned.
+The *pt_status_flag* enumeration is declared as:
+
+~~~{.c}
+/** Decoder status flags. */
+enum pt_status_flag {
+ /** There is an event pending. */
+ pts_event_pending = 1 << 0,
+
+ /** The address has been suppressed. */
+ pts_ip_suppressed = 1 << 1,
+
+ /** There is no more trace data available. */
+ pts_eos = 1 << 2
+};
+~~~
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* argument is NULL.
+
+pte_eos
+: There is no (further) PSB+ header in the trace stream
+ (**pt_qry_sync_forward**() and **pt_qry_sync_backward**()) or at *offset*
+ bytes into the trace buffer (**pt_qry_sync_set**()).
+
+pte_nosync
+: There is no PSB packet at *offset* bytes from the beginning of the trace
+ (**pt_qry_sync_set**() only).
+
+pte_bad_opc
+: The decoder encountered an unsupported Intel PT packet opcode.
+
+pte_bad_packet
+: The decoder encountered an unsupported Intel PT packet payload.
+
+
+# EXAMPLE
+
+The following example re-synchronizes an Intel PT query decoder after decode
+errors:
+
+~~~{.c}
+int foo(struct pt_query_decoder *decoder) {
+ for (;;) {
+ int errcode;
+
+ errcode = pt_qry_sync_forward(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ do {
+ errcode = decode(decoder);
+ } while (errcode >= 0);
+ }
+}
+~~~
+
+
+# SEE ALSO
+
+**pt_qry_alloc_decoder**(3), **pt_qry_free_decoder**(3),
+**pt_qry_get_offset**(3), **pt_qry_get_sync_offset**(3),
+**pt_qry_get_config**(3), **pt_qry_cond_branch**(3),
+**pt_qry_indirect_branch**(3), **pt_qry_event**(3), **pt_qry_time**(3),
+**pt_qry_core_bus_ratio**(3)
diff --git a/doc/man/pt_qry_time.3.md b/doc/man/pt_qry_time.3.md
new file mode 100644
index 000000000000..8bbfa4c8a546
--- /dev/null
+++ b/doc/man/pt_qry_time.3.md
@@ -0,0 +1,128 @@
+% PT_QRY_TIME(3)
+
+<!---
+ ! Copyright (c) 2015-2018, Intel Corporation
+ !
+ ! Redistribution and use in source and binary forms, with or without
+ ! modification, are permitted provided that the following conditions are met:
+ !
+ ! * Redistributions of source code must retain the above copyright notice,
+ ! this list of conditions and the following disclaimer.
+ ! * Redistributions in binary form must reproduce the above copyright notice,
+ ! this list of conditions and the following disclaimer in the documentation
+ ! and/or other materials provided with the distribution.
+ ! * Neither the name of Intel Corporation nor the names of its contributors
+ ! may be used to endorse or promote products derived from this software
+ ! without specific prior written permission.
+ !
+ ! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ ! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ ! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ! ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ ! LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ ! CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ ! SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ ! INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ ! CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ! ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ ! POSSIBILITY OF SUCH DAMAGE.
+ !-->
+
+# NAME
+
+pt_qry_time, pt_qry_core_bus_ratio, pt_insn_time, pt_insn_core_bus_ratio,
+pt_blk_time, pt_blk_core_bus_ratio - query an Intel(R) Processor Trace decoder
+for timing information
+
+
+# SYNOPSIS
+
+| **\#include `<intel-pt.h>`**
+|
+| **int pt_qry_time(struct pt_query_decoder \**decoder*, uint64_t \**time*,**
+| **uint32_t \**lost_mtc*, uint32_t \**lost_cyc*);**
+| **int pt_qry_core_bus_ratio(struct pt_query_decoder \**decoder*,**
+| **uint32_t \**cbr*);**
+|
+| **int pt_insn_time(struct pt_insn_decoder \**decoder*, uint64_t \**time*,**
+| **uint32_t \**lost_mtc*, uint32_t \**lost_cyc*);**
+| **int pt_insn_core_bus_ratio(struct pt_insn_decoder \**decoder*,**
+| **uint32_t \**cbr*);**
+|
+| **int pt_blk_time(struct pt_block_decoder \**decoder*, uint64_t \**time*,**
+| **uint32_t \**lost_mtc*, uint32_t \**lost_cyc*);**
+| **int pt_blk_core_bus_ratio(struct pt_block_decoder \**decoder*,**
+| **uint32_t \**cbr*);**
+
+Link with *-lipt*.
+
+
+# DESCRIPTION
+
+**pt_qry_time**(), **pt_insn_time**(), and **pt_blk_time**() provide the current
+estimated timestamp count (TSC) value in the unsigned integer variable pointed
+to by the *time* argument. The returned value corresponds to what an **rdtsc**
+instruction would have returned.
+
+At configurable intervals, Intel PT contains the full, accurate TSC value.
+Between those intervals, the timestamp count is estimated using a collection of
+lower-bandwidth packets, the Mini Time Counter (MTC) packet and the Cycle Count
+Packet (CYC). Depending on the Intel PT configuration, timing can be very
+precise at the cost of increased bandwidth or less precise but requiring lower
+bandwidth.
+
+The decoder needs to be calibrated in order to translate Cycle Counter ticks
+into Core Crystal Clock ticks. Without calibration, CYC packets need to be
+dropped. The decoder calibrates itself using MTC, CYC, and CBR packets.
+
+To interpret MTC and CYC packets, the decoder needs additional information
+provided in respective fields in the *pt_config* structure. Lacking this
+information, MTC packets may need to be dropped. This will impact the precision
+of the estimated timestamp count by losing periodic updates and it will impact
+calibration, which may result in reduced precision for cycle-accurate timing.
+
+The number of dropped MTC and CYC packets gives a rough idea about the quality
+of the estimated timestamp count. The value of dropped MTC and CYC packets is
+given in the unsigned integer variables pointed to by the *lost_mtc* and
+*lost_cyc* arguments respectively. If one or both of the arguments is NULL, no
+information on lost packets is provided for the respective packet type.
+
+**pt_qry_core_bus_ratio**(), **pt_insn_core_bus_ratio**(), and
+**pt_blk_core_bus_ratio**() give the last known core:bus ratio as provided by
+the Core Bus Ratio (CBR) Intel PT packet.
+
+
+# RETURN VALUE
+
+All functions return zero on success or a negative *pt_error_code* enumeration
+constant in case of an error.
+
+
+# ERRORS
+
+pte_invalid
+: The *decoder* or *time* (**pt_qry_time**(), **pt_insn_time**(), and
+ **pt_blk_time**()) or *cbr* (**pt_qry_core_bus_ratio**(),
+ **pt_insn_core_bus_ratio**(), and **pt_blk_core_bus_ratio**()) argument is
+ NULL.
+
+pte_no_time
+: There has not been a TSC packet to provide the full, accurate Time Stamp
+ Count. There may have been MTC or CYC packets, so the provided *time* may
+ be non-zero. It is zero if there has not been any timing packet yet.
+
+ Depending on the Intel PT configuration, TSC packets may not have been
+ enabled. In this case, the *time* value provides the relative time based on
+ other timing packets.
+
+pte_no_cbr
+: There has not been a CBR packet to provide the core:bus ratio. The *cbr*
+ value is undefined in this case.
+
+
+# SEE ALSO
+
+**pt_qry_alloc_decoder**(3), **pt_qry_free_decoder**(3),
+**pt_qry_cond_branch**(3), **pt_qry_indirect_branch**(3), **pt_qry_event**(3),
+**pt_insn_alloc_decoder**(3), **pt_insn_free_decoder**(3), **pt_insn_next**(3),
+**pt_blk_alloc_decoder**(3), **pt_blk_free_decoder**(3), **pt_blk_next**(3)
diff --git a/include/posix/threads.h b/include/posix/threads.h
new file mode 100644
index 000000000000..a9dcf05f757a
--- /dev/null
+++ b/include/posix/threads.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * It looks like there is still no support for C11's threads.h.
+ *
+ * We implement the few features we actually need hoping that this file will
+ * soon go away.
+ */
+
+#ifndef THREADS_H
+#define THREADS_H
+
+#include <pthread.h>
+
+#ifndef PTHREAD_MUTEX_NORMAL
+# define PTHREAD_MUTEX_NORMAL PTHREAD_MUTEX_TIMED_NP
+#endif
+
+#include <stdint.h>
+#include <stdlib.h>
+
+enum {
+ thrd_success = 1,
+ thrd_error
+};
+
+struct pt_thread {
+ pthread_t thread;
+};
+typedef struct pt_thread thrd_t;
+
+typedef int (*thrd_start_t)(void *);
+
+
+struct thrd_args {
+ thrd_start_t fun;
+ void *arg;
+};
+
+static void *thrd_routine(void *arg)
+{
+ struct thrd_args *args;
+ int result;
+
+ args = arg;
+ if (!args)
+ return (void *) (intptr_t) -1;
+
+ result = -1;
+ if (args->fun)
+ result = args->fun(args->arg);
+
+ free(args);
+
+ return (void *) (intptr_t) result;
+}
+
+static inline int thrd_create(thrd_t *thrd, thrd_start_t fun, void *arg)
+{
+ struct thrd_args *args;
+ int errcode;
+
+ if (!thrd || !fun)
+ return thrd_error;
+
+ args = malloc(sizeof(*args));
+ if (!args)
+ return thrd_error;
+
+ args->fun = fun;
+ args->arg = arg;
+
+ errcode = pthread_create(&thrd->thread, NULL, thrd_routine, args);
+ if (errcode) {
+ free(args);
+ return thrd_error;
+ }
+
+ return thrd_success;
+}
+
+static inline int thrd_join(thrd_t *thrd, int *res)
+{
+ void *result;
+ int errcode;
+
+ if (!thrd)
+ return thrd_error;
+
+ errcode = pthread_join(thrd->thread, &result);
+ if (errcode)
+ return thrd_error;
+
+ if (res)
+ *res = (int) (intptr_t) result;
+
+ return thrd_success;
+}
+
+
+struct pt_mutex {
+ pthread_mutex_t mutex;
+};
+typedef struct pt_mutex mtx_t;
+
+enum {
+ mtx_plain = PTHREAD_MUTEX_NORMAL
+};
+
+static inline int mtx_init(mtx_t *mtx, int type)
+{
+ int errcode;
+
+ if (!mtx || type != mtx_plain)
+ return thrd_error;
+
+ errcode = pthread_mutex_init(&mtx->mutex, NULL);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+static inline void mtx_destroy(mtx_t *mtx)
+{
+ if (mtx)
+ (void) pthread_mutex_destroy(&mtx->mutex);
+}
+
+static inline int mtx_lock(mtx_t *mtx)
+{
+ int errcode;
+
+ if (!mtx)
+ return thrd_error;
+
+ errcode = pthread_mutex_lock(&mtx->mutex);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+static inline int mtx_unlock(mtx_t *mtx)
+{
+ int errcode;
+
+ if (!mtx)
+ return thrd_error;
+
+ errcode = pthread_mutex_unlock(&mtx->mutex);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+
+struct pt_cond {
+ pthread_cond_t cond;
+};
+typedef struct pt_cond cnd_t;
+
+static inline int cnd_init(cnd_t *cnd)
+{
+ int errcode;
+
+ if (!cnd)
+ return thrd_error;
+
+ errcode = pthread_cond_init(&cnd->cond, NULL);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+static inline int cnd_destroy(cnd_t *cnd)
+{
+ int errcode;
+
+ if (!cnd)
+ return thrd_error;
+
+ errcode = pthread_cond_destroy(&cnd->cond);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+static inline int cnd_signal(cnd_t *cnd)
+{
+ int errcode;
+
+ if (!cnd)
+ return thrd_error;
+
+ errcode = pthread_cond_signal(&cnd->cond);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+static inline int cnd_broadcast(cnd_t *cnd)
+{
+ int errcode;
+
+ if (!cnd)
+ return thrd_error;
+
+ errcode = pthread_cond_broadcast(&cnd->cond);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+static inline int cnd_wait(cnd_t *cnd, mtx_t *mtx)
+{
+ int errcode;
+
+ if (!cnd || !mtx)
+ return thrd_error;
+
+ errcode = pthread_cond_wait(&cnd->cond, &mtx->mutex);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+#endif /* THREADS_H */
diff --git a/include/pt_compiler.h b/include/pt_compiler.h
new file mode 100644
index 000000000000..611703f25928
--- /dev/null
+++ b/include/pt_compiler.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_COMPILER_H
+#define PT_COMPILER_H
+
+
+/* Both the relevant Clang and GCC versions support this. */
+#if !defined(__has_attribute)
+# define __has_attribute(attr) 0
+#endif
+
+#if !defined(fallthrough)
+# if (__has_attribute(fallthrough))
+# define fallthrough __attribute__((fallthrough))
+# else
+# define fallthrough /* Fall through. */
+# endif
+#endif /* !defined(fallthrough) */
+
+
+#endif /* PT_COMPILER_H */
diff --git a/include/windows/inttypes.h b/include/windows/inttypes.h
new file mode 100644
index 000000000000..3659f361f1b4
--- /dev/null
+++ b/include/windows/inttypes.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#ifndef PRId64
+# define PRId64 "lld"
+#endif
+#ifndef PRIu64
+# define PRIu64 "llu"
+#endif
+#ifndef PRIx64
+# define PRIx64 "llx"
+#endif
+
+#ifndef PRId32
+# define PRId32 "d"
+#endif
+#ifndef PRIu32
+# define PRIu32 "u"
+#endif
+#ifndef PRIx32
+# define PRIx32 "x"
+#endif
+
+#ifndef PRIu16
+# define PRIu16 "u"
+#endif
+
+#ifndef PRIu8
+# define PRIu8 "u"
+#endif
+#ifndef PRIx8
+# define PRIx8 "x"
+#endif
+
+#ifndef SCNx64
+# define SCNx64 "llx"
+#endif
diff --git a/include/windows/threads.h b/include/windows/threads.h
new file mode 100644
index 000000000000..9e57b81d4ca7
--- /dev/null
+++ b/include/windows/threads.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * It looks like there is still no support for C11's threads.h.
+ *
+ * We implement the few features we actually need hoping that this file will
+ * soon go away.
+ */
+
+#ifndef THREADS_H
+#define THREADS_H
+
+#include "windows.h"
+
+
+enum {
+ thrd_success = 1,
+ thrd_error
+};
+
+
+struct pt_thread {
+ HANDLE handle;
+};
+typedef struct pt_thread thrd_t;
+
+typedef int (*thrd_start_t)(void *);
+
+
+struct thrd_args {
+ thrd_start_t fun;
+ void *arg;
+};
+
+static DWORD WINAPI thrd_routine(void *arg)
+{
+ struct thrd_args *args;
+ int result;
+
+ args = (struct thrd_args *) arg;
+ if (!args)
+ return (DWORD) -1;
+
+ result = -1;
+ if (args->fun)
+ result = args->fun(args->arg);
+
+ free(args);
+
+ return (DWORD) result;
+}
+
+static inline int thrd_create(thrd_t *thrd, thrd_start_t fun, void *arg)
+{
+ struct thrd_args *args;
+ HANDLE handle;
+
+ if (!thrd || !fun)
+ return thrd_error;
+
+ args = malloc(sizeof(*args));
+ if (!args)
+ return thrd_error;
+
+ args->fun = fun;
+ args->arg = arg;
+
+ handle = CreateThread(NULL, 0, thrd_routine, args, 0, NULL);
+ if (!handle) {
+ free(args);
+ return thrd_error;
+ }
+
+ thrd->handle = handle;
+ return thrd_success;
+}
+
+static inline int thrd_join(thrd_t *thrd, int *res)
+{
+ DWORD status;
+ BOOL success;
+
+ if (!thrd)
+ return thrd_error;
+
+ status = WaitForSingleObject(thrd->handle, INFINITE);
+ if (status)
+ return thrd_error;
+
+ if (res) {
+ DWORD result;
+
+ success = GetExitCodeThread(thrd->handle, &result);
+ if (!success) {
+ (void) CloseHandle(thrd->handle);
+ return thrd_error;
+ }
+
+ *res = (int) result;
+ }
+
+ success = CloseHandle(thrd->handle);
+ if (!success)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+struct pt_mutex {
+ CRITICAL_SECTION cs;
+};
+typedef struct pt_mutex mtx_t;
+
+enum {
+ mtx_plain
+};
+
+static inline int mtx_init(mtx_t *mtx, int type)
+{
+ if (!mtx || type != mtx_plain)
+ return thrd_error;
+
+ InitializeCriticalSection(&mtx->cs);
+
+ return thrd_success;
+}
+
+static inline void mtx_destroy(mtx_t *mtx)
+{
+ if (mtx)
+ DeleteCriticalSection(&mtx->cs);
+}
+
+static inline int mtx_lock(mtx_t *mtx)
+{
+ if (!mtx)
+ return thrd_error;
+
+ EnterCriticalSection(&mtx->cs);
+
+ return thrd_success;
+}
+
+static inline int mtx_unlock(mtx_t *mtx)
+{
+ if (!mtx)
+ return thrd_error;
+
+ LeaveCriticalSection(&mtx->cs);
+
+ return thrd_success;
+}
+
+
+struct pt_cond {
+ CONDITION_VARIABLE cond;
+};
+typedef struct pt_cond cnd_t;
+
+static inline int cnd_init(cnd_t *cnd)
+{
+ if (!cnd)
+ return thrd_error;
+
+ InitializeConditionVariable(&cnd->cond);
+
+ return thrd_success;
+}
+
+static inline int cnd_destroy(cnd_t *cnd)
+{
+ if (!cnd)
+ return thrd_error;
+
+ /* Nothing to do. */
+
+ return thrd_success;
+}
+
+static inline int cnd_signal(cnd_t *cnd)
+{
+ if (!cnd)
+ return thrd_error;
+
+ WakeConditionVariable(&cnd->cond);
+
+ return thrd_success;
+}
+
+static inline int cnd_broadcast(cnd_t *cnd)
+{
+ if (!cnd)
+ return thrd_error;
+
+ WakeAllConditionVariable(&cnd->cond);
+
+ return thrd_success;
+}
+
+static inline int cnd_wait(cnd_t *cnd, mtx_t *mtx)
+{
+ BOOL success;
+
+ if (!cnd || !mtx)
+ return thrd_error;
+
+ success = SleepConditionVariableCS(&cnd->cond, &mtx->cs, INFINITE);
+ if (!success)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+#endif /* THREADS_H */
diff --git a/libipt/CMakeLists.txt b/libipt/CMakeLists.txt
new file mode 100644
index 000000000000..726bdfe0c869
--- /dev/null
+++ b/libipt/CMakeLists.txt
@@ -0,0 +1,172 @@
+# Copyright (c) 2013-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+include_directories(
+ internal/include
+)
+
+set(LIBIPT_SECTION_FILES
+ src/pt_section.c
+ src/pt_section_file.c
+)
+
+set(LIBIPT_FILES
+ src/pt_error.c
+ src/pt_packet_decoder.c
+ src/pt_query_decoder.c
+ src/pt_encoder.c
+ src/pt_sync.c
+ src/pt_version.c
+ src/pt_last_ip.c
+ src/pt_tnt_cache.c
+ src/pt_ild.c
+ src/pt_image.c
+ src/pt_image_section_cache.c
+ src/pt_retstack.c
+ src/pt_insn_decoder.c
+ src/pt_time.c
+ src/pt_asid.c
+ src/pt_event_queue.c
+ src/pt_packet.c
+ src/pt_decoder_function.c
+ src/pt_config.c
+ src/pt_insn.c
+ src/pt_block_decoder.c
+ src/pt_block_cache.c
+ src/pt_msec_cache.c
+)
+
+if (CMAKE_HOST_UNIX)
+ include_directories(
+ internal/include/posix
+ )
+
+ set(LIBIPT_FILES ${LIBIPT_FILES} src/posix/init.c)
+ set(LIBIPT_SECTION_FILES ${LIBIPT_SECTION_FILES} src/posix/pt_section_posix.c)
+endif (CMAKE_HOST_UNIX)
+
+if (CMAKE_HOST_WIN32)
+ add_definitions(
+ # export libipt symbols
+ #
+ /Dpt_export=__declspec\(dllexport\)
+ )
+
+ include_directories(
+ internal/include/windows
+ )
+
+ set(LIBIPT_FILES ${LIBIPT_FILES} src/windows/init.c)
+ set(LIBIPT_SECTION_FILES ${LIBIPT_SECTION_FILES} src/windows/pt_section_windows.c)
+endif (CMAKE_HOST_WIN32)
+
+set(LIBIPT_FILES ${LIBIPT_FILES} ${LIBIPT_SECTION_FILES})
+
+add_library(libipt SHARED
+ ${LIBIPT_FILES}
+)
+
+# put the version into the intel-pt header
+#
+configure_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/include/intel-pt.h.in
+ ${CMAKE_CURRENT_BINARY_DIR}/include/intel-pt.h
+)
+
+set_target_properties(libipt PROPERTIES
+ PREFIX ""
+ PUBLIC_HEADER ${CMAKE_CURRENT_BINARY_DIR}/include/intel-pt.h
+ VERSION ${PT_VERSION}
+ SOVERSION ${PT_VERSION_MAJOR}
+)
+
+install(TARGETS libipt
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
+)
+
+
+function(add_ptunit_std_test name)
+ add_ptunit_c_test(${name} src/pt_${name}.c ${ARGN})
+endfunction(add_ptunit_std_test)
+
+
+add_ptunit_std_test(last_ip)
+add_ptunit_std_test(tnt_cache)
+add_ptunit_std_test(retstack)
+add_ptunit_std_test(ild)
+add_ptunit_std_test(cpu)
+add_ptunit_std_test(time)
+add_ptunit_std_test(asid)
+add_ptunit_std_test(event_queue)
+add_ptunit_std_test(image src/pt_asid.c)
+add_ptunit_std_test(sync src/pt_packet.c)
+add_ptunit_std_test(config)
+add_ptunit_std_test(image_section_cache)
+add_ptunit_std_test(block_cache)
+add_ptunit_std_test(msec_cache)
+
+add_ptunit_c_test(mapped_section src/pt_asid.c)
+add_ptunit_c_test(query
+ src/pt_encoder.c
+ src/pt_last_ip.c
+ src/pt_packet_decoder.c
+ src/pt_sync.c
+ src/pt_tnt_cache.c
+ src/pt_time.c
+ src/pt_event_queue.c
+ src/pt_query_decoder.c
+ src/pt_packet.c
+ src/pt_decoder_function.c
+ src/pt_packet_decoder.c
+ src/pt_config.c
+ src/pt_time.c
+ src/pt_block_cache.c
+)
+add_ptunit_c_test(section ${LIBIPT_SECTION_FILES})
+add_ptunit_c_test(section-file
+ test/src/ptunit-section.c
+ src/pt_section.c
+ src/pt_section_file.c
+)
+add_ptunit_c_test(packet
+ src/pt_encoder.c
+ src/pt_packet_decoder.c
+ src/pt_sync.c
+ src/pt_packet.c
+ src/pt_decoder_function.c
+ src/pt_config.c
+)
+add_ptunit_c_test(fetch
+ src/pt_decoder_function.c
+ src/pt_encoder.c
+ src/pt_config.c
+)
+
+add_ptunit_cpp_test(cpp)
+add_ptunit_libraries(cpp libipt)
diff --git a/libipt/include/intel-pt.h.in b/libipt/include/intel-pt.h.in
new file mode 100755
index 000000000000..de1c6275c806
--- /dev/null
+++ b/libipt/include/intel-pt.h.in
@@ -0,0 +1,2463 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef INTEL_PT_H
+#define INTEL_PT_H
+
+#include <stdint.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Intel(R) Processor Trace (Intel PT) decoder library.
+ *
+ * This file is logically structured into the following sections:
+ *
+ * - Version
+ * - Errors
+ * - Configuration
+ * - Packet encoder / decoder
+ * - Query decoder
+ * - Traced image
+ * - Instruction flow decoder
+ * - Block decoder
+ */
+
+
+
+struct pt_encoder;
+struct pt_packet_decoder;
+struct pt_query_decoder;
+struct pt_insn_decoder;
+struct pt_block_decoder;
+
+
+
+/* A macro to mark functions as exported. */
+#ifndef pt_export
+# if defined(__GNUC__)
+# define pt_export __attribute__((visibility("default")))
+# elif defined(_MSC_VER)
+# define pt_export __declspec(dllimport)
+# else
+# error "unknown compiler"
+# endif
+#endif
+
+
+
+/* Version. */
+
+
+/** The header version. */
+#define LIBIPT_VERSION_MAJOR ${PT_VERSION_MAJOR}
+#define LIBIPT_VERSION_MINOR ${PT_VERSION_MINOR}
+
+#define LIBIPT_VERSION ((LIBIPT_VERSION_MAJOR << 8) + LIBIPT_VERSION_MINOR)
+
+
+/** The library version. */
+struct pt_version {
+ /** Major version number. */
+ uint8_t major;
+
+ /** Minor version number. */
+ uint8_t minor;
+
+ /** Reserved bits. */
+ uint16_t reserved;
+
+ /** Build number. */
+ uint32_t build;
+
+ /** Version extension. */
+ const char *ext;
+};
+
+
+/** Return the library version. */
+extern pt_export struct pt_version pt_library_version(void);
+
+
+
+/* Errors. */
+
+
+
+/** Error codes. */
+enum pt_error_code {
+ /* No error. Everything is OK. */
+ pte_ok,
+
+ /* Internal decoder error. */
+ pte_internal,
+
+ /* Invalid argument. */
+ pte_invalid,
+
+ /* Decoder out of sync. */
+ pte_nosync,
+
+ /* Unknown opcode. */
+ pte_bad_opc,
+
+ /* Unknown payload. */
+ pte_bad_packet,
+
+ /* Unexpected packet context. */
+ pte_bad_context,
+
+ /* Decoder reached end of trace stream. */
+ pte_eos,
+
+ /* No packet matching the query to be found. */
+ pte_bad_query,
+
+ /* Decoder out of memory. */
+ pte_nomem,
+
+ /* Bad configuration. */
+ pte_bad_config,
+
+ /* There is no IP. */
+ pte_noip,
+
+ /* The IP has been suppressed. */
+ pte_ip_suppressed,
+
+ /* There is no memory mapped at the requested address. */
+ pte_nomap,
+
+ /* An instruction could not be decoded. */
+ pte_bad_insn,
+
+ /* No wall-clock time is available. */
+ pte_no_time,
+
+ /* No core:bus ratio available. */
+ pte_no_cbr,
+
+ /* Bad traced image. */
+ pte_bad_image,
+
+ /* A locking error. */
+ pte_bad_lock,
+
+ /* The requested feature is not supported. */
+ pte_not_supported,
+
+ /* The return address stack is empty. */
+ pte_retstack_empty,
+
+ /* A compressed return is not indicated correctly by a taken branch. */
+ pte_bad_retcomp,
+
+ /* The current decoder state does not match the state in the trace. */
+ pte_bad_status_update,
+
+ /* The trace did not contain an expected enabled event. */
+ pte_no_enable,
+
+ /* An event was ignored. */
+ pte_event_ignored,
+
+ /* Something overflowed. */
+ pte_overflow,
+
+ /* A file handling error. */
+ pte_bad_file,
+
+ /* Unknown cpu. */
+ pte_bad_cpu
+};
+
+
+/** Decode a function return value into an pt_error_code. */
+static inline enum pt_error_code pt_errcode(int status)
+{
+ return (status >= 0) ? pte_ok : (enum pt_error_code) -status;
+}
+
+/** Return a human readable error string. */
+extern pt_export const char *pt_errstr(enum pt_error_code);
+
+
+
+/* Configuration. */
+
+
+
+/** A cpu vendor. */
+enum pt_cpu_vendor {
+ pcv_unknown,
+ pcv_intel
+};
+
+/** A cpu identifier. */
+struct pt_cpu {
+ /** The cpu vendor. */
+ enum pt_cpu_vendor vendor;
+
+ /** The cpu family. */
+ uint16_t family;
+
+ /** The cpu model. */
+ uint8_t model;
+
+ /** The stepping. */
+ uint8_t stepping;
+};
+
+/** A collection of Intel PT errata. */
+struct pt_errata {
+ /** BDM70: Intel(R) Processor Trace PSB+ Packets May Contain
+ * Unexpected Packets.
+ *
+ * Same as: SKD024, SKL021, KBL021.
+ *
+ * Some Intel Processor Trace packets should be issued only between
+ * TIP.PGE and TIP.PGD packets. Due to this erratum, when a TIP.PGE
+ * packet is generated it may be preceded by a PSB+ that incorrectly
+ * includes FUP and MODE.Exec packets.
+ */
+ uint32_t bdm70:1;
+
+ /** BDM64: An Incorrect LBR or Intel(R) Processor Trace Packet May Be
+ * Recorded Following a Transactional Abort.
+ *
+ * Use of Intel(R) Transactional Synchronization Extensions (Intel(R)
+ * TSX) may result in a transactional abort. If an abort occurs
+ * immediately following a branch instruction, an incorrect branch
+ * target may be logged in an LBR (Last Branch Record) or in an Intel(R)
+ * Processor Trace (Intel(R) PT) packet before the LBR or Intel PT
+ * packet produced by the abort.
+ */
+ uint32_t bdm64:1;
+
+ /** SKD007: Intel(R) PT Buffer Overflow May Result in Incorrect Packets.
+ *
+ * Same as: SKL049, KBL041.
+ *
+ * Under complex micro-architectural conditions, an Intel PT (Processor
+ * Trace) OVF (Overflow) packet may be issued after the first byte of a
+ * multi-byte CYC (Cycle Count) packet, instead of any remaining bytes
+ * of the CYC.
+ */
+ uint32_t skd007:1;
+
+ /** SKD022: VM Entry That Clears TraceEn May Generate a FUP.
+ *
+ * Same as: SKL024, KBL023.
+ *
+ * If VM entry clears Intel(R) PT (Intel Processor Trace)
+ * IA32_RTIT_CTL.TraceEn (MSR 570H, bit 0) while PacketEn is 1 then a
+ * FUP (Flow Update Packet) will precede the TIP.PGD (Target IP Packet,
+ * Packet Generation Disable). VM entry can clear TraceEn if the
+ * VM-entry MSR-load area includes an entry for the IA32_RTIT_CTL MSR.
+ */
+ uint32_t skd022:1;
+
+ /** SKD010: Intel(R) PT FUP May be Dropped After OVF.
+ *
+ * Same as: SKD014, SKL033, KBL030.
+ *
+ * Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not
+ * be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP
+ * Packet, Packet Generation Enable).
+ */
+ uint32_t skd010:1;
+
+ /** SKL014: Intel(R) PT TIP.PGD May Not Have Target IP Payload.
+ *
+ * Same as: KBL014.
+ *
+ * When Intel PT (Intel Processor Trace) is enabled and a direct
+ * unconditional branch clears IA32_RTIT_STATUS.FilterEn (MSR 571H, bit
+ * 0), due to this erratum, the resulting TIP.PGD (Target IP Packet,
+ * Packet Generation Disable) may not have an IP payload with the target
+ * IP.
+ */
+ uint32_t skl014:1;
+
+ /** APL12: Intel(R) PT OVF May Be Followed By An Unexpected FUP Packet.
+ *
+ * Certain Intel PT (Processor Trace) packets including FUPs (Flow
+ * Update Packets), should be issued only between TIP.PGE (Target IP
+ * Packet - Packet Generaton Enable) and TIP.PGD (Target IP Packet -
+ * Packet Generation Disable) packets. When outside a TIP.PGE/TIP.PGD
+ * pair, as a result of IA32_RTIT_STATUS.FilterEn[0] (MSR 571H) being
+ * cleared, an OVF (Overflow) packet may be unexpectedly followed by a
+ * FUP.
+ */
+ uint32_t apl12:1;
+
+ /** APL11: Intel(R) PT OVF Pakcet May Be Followed by TIP.PGD Packet
+ *
+ * If Intel PT (Processor Trace) encounters an internal buffer overflow
+ * and generates an OVF (Overflow) packet just as IA32_RTIT_CTL (MSR
+ * 570H) bit 0 (TraceEn) is cleared, or during a far transfer that
+ * causes IA32_RTIT_STATUS.ContextEn[1] (MSR 571H) to be cleared, the
+ * OVF may be followed by a TIP.PGD (Target Instruction Pointer - Packet
+ * Generation Disable) packet.
+ */
+ uint32_t apl11:1;
+
+ /* Reserve a few bytes for the future. */
+ uint32_t reserved[15];
+};
+
+/** A collection of decoder-specific configuration flags. */
+struct pt_conf_flags {
+ /** The decoder variant. */
+ union {
+ /** Flags for the block decoder. */
+ struct {
+ /** End a block after a call instruction. */
+ uint32_t end_on_call:1;
+
+ /** Enable tick events for timing updates. */
+ uint32_t enable_tick_events:1;
+
+ /** End a block after a jump instruction. */
+ uint32_t end_on_jump:1;
+ } block;
+
+ /** Flags for the instruction flow decoder. */
+ struct {
+ /** Enable tick events for timing updates. */
+ uint32_t enable_tick_events:1;
+ } insn;
+
+ /* Reserve a few bytes for future extensions. */
+ uint32_t reserved[4];
+ } variant;
+};
+
+/** The address filter configuration. */
+struct pt_conf_addr_filter {
+ /** The address filter configuration.
+ *
+ * This corresponds to the respective fields in IA32_RTIT_CTL MSR.
+ */
+ union {
+ uint64_t addr_cfg;
+
+ struct {
+ uint32_t addr0_cfg:4;
+ uint32_t addr1_cfg:4;
+ uint32_t addr2_cfg:4;
+ uint32_t addr3_cfg:4;
+ } ctl;
+ } config;
+
+ /** The address ranges configuration.
+ *
+ * This corresponds to the IA32_RTIT_ADDRn_A/B MSRs.
+ */
+ uint64_t addr0_a;
+ uint64_t addr0_b;
+ uint64_t addr1_a;
+ uint64_t addr1_b;
+ uint64_t addr2_a;
+ uint64_t addr2_b;
+ uint64_t addr3_a;
+ uint64_t addr3_b;
+
+ /* Reserve some space. */
+ uint64_t reserved[8];
+};
+
+/** An unknown packet. */
+struct pt_packet_unknown;
+
+/** An Intel PT decoder configuration.
+ */
+struct pt_config {
+ /** The size of the config structure in bytes. */
+ size_t size;
+
+ /** The trace buffer begin address. */
+ uint8_t *begin;
+
+ /** The trace buffer end address. */
+ uint8_t *end;
+
+ /** An optional callback for handling unknown packets.
+ *
+ * If \@callback is not NULL, it is called for any unknown opcode.
+ */
+ struct {
+ /** The callback function.
+ *
+ * It shall decode the packet at \@pos into \@unknown.
+ * It shall return the number of bytes read upon success.
+ * It shall return a negative pt_error_code otherwise.
+ * The below context is passed as \@context.
+ */
+ int (*callback)(struct pt_packet_unknown *unknown,
+ const struct pt_config *config,
+ const uint8_t *pos, void *context);
+
+ /** The user-defined context for this configuration. */
+ void *context;
+ } decode;
+
+ /** The cpu on which Intel PT has been recorded. */
+ struct pt_cpu cpu;
+
+ /** The errata to apply when encoding or decoding Intel PT. */
+ struct pt_errata errata;
+
+ /* The CTC frequency.
+ *
+ * This is only required if MTC packets have been enabled in
+ * IA32_RTIT_CTRL.MTCEn.
+ */
+ uint32_t cpuid_0x15_eax, cpuid_0x15_ebx;
+
+ /* The MTC frequency as defined in IA32_RTIT_CTL.MTCFreq.
+ *
+ * This is only required if MTC packets have been enabled in
+ * IA32_RTIT_CTRL.MTCEn.
+ */
+ uint8_t mtc_freq;
+
+ /* The nominal frequency as defined in MSR_PLATFORM_INFO[15:8].
+ *
+ * This is only required if CYC packets have been enabled in
+ * IA32_RTIT_CTRL.CYCEn.
+ *
+ * If zero, timing calibration will only be able to use MTC and CYC
+ * packets.
+ *
+ * If not zero, timing calibration will also be able to use CBR
+ * packets.
+ */
+ uint8_t nom_freq;
+
+ /** A collection of decoder-specific flags. */
+ struct pt_conf_flags flags;
+
+ /** The address filter configuration. */
+ struct pt_conf_addr_filter addr_filter;
+};
+
+
+/** Zero-initialize an Intel PT configuration. */
+static inline void pt_config_init(struct pt_config *config)
+{
+ memset(config, 0, sizeof(*config));
+
+ config->size = sizeof(*config);
+}
+
+/** Determine errata for a given cpu.
+ *
+ * Updates \@errata based on \@cpu.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ * Returns -pte_invalid if \@errata or \@cpu is NULL.
+ * Returns -pte_bad_cpu if \@cpu is not known.
+ */
+extern pt_export int pt_cpu_errata(struct pt_errata *errata,
+ const struct pt_cpu *cpu);
+
+
+
+/* Packet encoder / decoder. */
+
+
+
+/** Intel PT packet types. */
+enum pt_packet_type {
+ /* An invalid packet. */
+ ppt_invalid,
+
+ /* A packet decodable by the optional decoder callback. */
+ ppt_unknown,
+
+ /* Actual packets supported by this library. */
+ ppt_pad,
+ ppt_psb,
+ ppt_psbend,
+ ppt_fup,
+ ppt_tip,
+ ppt_tip_pge,
+ ppt_tip_pgd,
+ ppt_tnt_8,
+ ppt_tnt_64,
+ ppt_mode,
+ ppt_pip,
+ ppt_vmcs,
+ ppt_cbr,
+ ppt_tsc,
+ ppt_tma,
+ ppt_mtc,
+ ppt_cyc,
+ ppt_stop,
+ ppt_ovf,
+ ppt_mnt,
+ ppt_exstop,
+ ppt_mwait,
+ ppt_pwre,
+ ppt_pwrx,
+ ppt_ptw
+};
+
+/** The IP compression. */
+enum pt_ip_compression {
+ /* The bits encode the payload size and the encoding scheme.
+ *
+ * No payload. The IP has been suppressed.
+ */
+ pt_ipc_suppressed = 0x0,
+
+ /* Payload: 16 bits. Update last IP. */
+ pt_ipc_update_16 = 0x01,
+
+ /* Payload: 32 bits. Update last IP. */
+ pt_ipc_update_32 = 0x02,
+
+ /* Payload: 48 bits. Sign extend to full address. */
+ pt_ipc_sext_48 = 0x03,
+
+ /* Payload: 48 bits. Update last IP. */
+ pt_ipc_update_48 = 0x04,
+
+ /* Payload: 64 bits. Full address. */
+ pt_ipc_full = 0x06
+};
+
+/** An execution mode. */
+enum pt_exec_mode {
+ ptem_unknown,
+ ptem_16bit,
+ ptem_32bit,
+ ptem_64bit
+};
+
+/** Mode packet leaves. */
+enum pt_mode_leaf {
+ pt_mol_exec = 0x00,
+ pt_mol_tsx = 0x20
+};
+
+/** A TNT-8 or TNT-64 packet. */
+struct pt_packet_tnt {
+ /** TNT payload bit size. */
+ uint8_t bit_size;
+
+ /** TNT payload excluding stop bit. */
+ uint64_t payload;
+};
+
+/** A packet with IP payload. */
+struct pt_packet_ip {
+ /** IP compression. */
+ enum pt_ip_compression ipc;
+
+ /** Zero-extended payload ip. */
+ uint64_t ip;
+};
+
+/** A mode.exec packet. */
+struct pt_packet_mode_exec {
+ /** The mode.exec csl bit. */
+ uint32_t csl:1;
+
+ /** The mode.exec csd bit. */
+ uint32_t csd:1;
+};
+
+static inline enum pt_exec_mode
+pt_get_exec_mode(const struct pt_packet_mode_exec *packet)
+{
+ if (packet->csl)
+ return packet->csd ? ptem_unknown : ptem_64bit;
+ else
+ return packet->csd ? ptem_32bit : ptem_16bit;
+}
+
+static inline struct pt_packet_mode_exec
+pt_set_exec_mode(enum pt_exec_mode mode)
+{
+ struct pt_packet_mode_exec packet;
+
+ switch (mode) {
+ default:
+ packet.csl = 1;
+ packet.csd = 1;
+ break;
+
+ case ptem_64bit:
+ packet.csl = 1;
+ packet.csd = 0;
+ break;
+
+ case ptem_32bit:
+ packet.csl = 0;
+ packet.csd = 1;
+ break;
+
+ case ptem_16bit:
+ packet.csl = 0;
+ packet.csd = 0;
+ break;
+ }
+
+ return packet;
+}
+
+/** A mode.tsx packet. */
+struct pt_packet_mode_tsx {
+ /** The mode.tsx intx bit. */
+ uint32_t intx:1;
+
+ /** The mode.tsx abrt bit. */
+ uint32_t abrt:1;
+};
+
+/** A mode packet. */
+struct pt_packet_mode {
+ /** Mode leaf. */
+ enum pt_mode_leaf leaf;
+
+ /** Mode bits. */
+ union {
+ /** Packet: mode.exec. */
+ struct pt_packet_mode_exec exec;
+
+ /** Packet: mode.tsx. */
+ struct pt_packet_mode_tsx tsx;
+ } bits;
+};
+
+/** A PIP packet. */
+struct pt_packet_pip {
+ /** The CR3 value. */
+ uint64_t cr3;
+
+ /** The non-root bit. */
+ uint32_t nr:1;
+};
+
+/** A TSC packet. */
+struct pt_packet_tsc {
+ /** The TSC value. */
+ uint64_t tsc;
+};
+
+/** A CBR packet. */
+struct pt_packet_cbr {
+ /** The core/bus cycle ratio. */
+ uint8_t ratio;
+};
+
+/** A TMA packet. */
+struct pt_packet_tma {
+ /** The crystal clock tick counter value. */
+ uint16_t ctc;
+
+ /** The fast counter value. */
+ uint16_t fc;
+};
+
+/** A MTC packet. */
+struct pt_packet_mtc {
+ /** The crystal clock tick counter value. */
+ uint8_t ctc;
+};
+
+/** A CYC packet. */
+struct pt_packet_cyc {
+ /** The cycle counter value. */
+ uint64_t value;
+};
+
+/** A VMCS packet. */
+struct pt_packet_vmcs {
+ /* The VMCS Base Address (i.e. the shifted payload). */
+ uint64_t base;
+};
+
+/** A MNT packet. */
+struct pt_packet_mnt {
+ /** The raw payload. */
+ uint64_t payload;
+};
+
+/** A EXSTOP packet. */
+struct pt_packet_exstop {
+ /** A flag specifying the binding of the packet:
+ *
+ * set: binds to the next FUP.
+ * clear: standalone.
+ */
+ uint32_t ip:1;
+};
+
+/** A MWAIT packet. */
+struct pt_packet_mwait {
+ /** The MWAIT hints (EAX). */
+ uint32_t hints;
+
+ /** The MWAIT extensions (ECX). */
+ uint32_t ext;
+};
+
+/** A PWRE packet. */
+struct pt_packet_pwre {
+ /** The resolved thread C-state. */
+ uint8_t state;
+
+ /** The resolved thread sub C-state. */
+ uint8_t sub_state;
+
+ /** A flag indicating whether the C-state entry was initiated by h/w. */
+ uint32_t hw:1;
+};
+
+/** A PWRX packet. */
+struct pt_packet_pwrx {
+ /** The core C-state at the time of the wake. */
+ uint8_t last;
+
+ /** The deepest core C-state achieved during sleep. */
+ uint8_t deepest;
+
+ /** The wake reason:
+ *
+ * - due to external interrupt received.
+ */
+ uint32_t interrupt:1;
+
+ /** - due to store to monitored address. */
+ uint32_t store:1;
+
+ /** - due to h/w autonomous condition such as HDC. */
+ uint32_t autonomous:1;
+};
+
+/** A PTW packet. */
+struct pt_packet_ptw {
+ /** The raw payload. */
+ uint64_t payload;
+
+ /** The payload size as encoded in the packet. */
+ uint8_t plc;
+
+ /** A flag saying whether a FUP is following PTW that provides
+ * the IP of the corresponding PTWRITE instruction.
+ */
+ uint32_t ip:1;
+};
+
+static inline int pt_ptw_size(uint8_t plc)
+{
+ switch (plc) {
+ case 0:
+ return 4;
+
+ case 1:
+ return 8;
+
+ case 2:
+ case 3:
+ return -pte_bad_packet;
+ }
+
+ return -pte_internal;
+}
+
+/** An unknown packet decodable by the optional decoder callback. */
+struct pt_packet_unknown {
+ /** Pointer to the raw packet bytes. */
+ const uint8_t *packet;
+
+ /** Optional pointer to a user-defined structure. */
+ void *priv;
+};
+
+/** An Intel PT packet. */
+struct pt_packet {
+ /** The type of the packet.
+ *
+ * This also determines the \@payload field.
+ */
+ enum pt_packet_type type;
+
+ /** The size of the packet including opcode and payload. */
+ uint8_t size;
+
+ /** Packet specific data. */
+ union {
+ /** Packets: pad, ovf, psb, psbend, stop - no payload. */
+
+ /** Packet: tnt-8, tnt-64. */
+ struct pt_packet_tnt tnt;
+
+ /** Packet: tip, fup, tip.pge, tip.pgd. */
+ struct pt_packet_ip ip;
+
+ /** Packet: mode. */
+ struct pt_packet_mode mode;
+
+ /** Packet: pip. */
+ struct pt_packet_pip pip;
+
+ /** Packet: tsc. */
+ struct pt_packet_tsc tsc;
+
+ /** Packet: cbr. */
+ struct pt_packet_cbr cbr;
+
+ /** Packet: tma. */
+ struct pt_packet_tma tma;
+
+ /** Packet: mtc. */
+ struct pt_packet_mtc mtc;
+
+ /** Packet: cyc. */
+ struct pt_packet_cyc cyc;
+
+ /** Packet: vmcs. */
+ struct pt_packet_vmcs vmcs;
+
+ /** Packet: mnt. */
+ struct pt_packet_mnt mnt;
+
+ /** Packet: exstop. */
+ struct pt_packet_exstop exstop;
+
+ /** Packet: mwait. */
+ struct pt_packet_mwait mwait;
+
+ /** Packet: pwre. */
+ struct pt_packet_pwre pwre;
+
+ /** Packet: pwrx. */
+ struct pt_packet_pwrx pwrx;
+
+ /** Packet: ptw. */
+ struct pt_packet_ptw ptw;
+
+ /** Packet: unknown. */
+ struct pt_packet_unknown unknown;
+ } payload;
+};
+
+
+
+/* Packet encoder. */
+
+
+
+/** Allocate an Intel PT packet encoder.
+ *
+ * The encoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the encoder.
+ *
+ * The encoder starts at the beginning of the trace buffer.
+ */
+extern pt_export struct pt_encoder *
+pt_alloc_encoder(const struct pt_config *config);
+
+/** Free an Intel PT packet encoder.
+ *
+ * The \@encoder must not be used after a successful return.
+ */
+extern pt_export void pt_free_encoder(struct pt_encoder *encoder);
+
+/** Hard set synchronization point of an Intel PT packet encoder.
+ *
+ * Synchronize \@encoder to \@offset within the trace buffer.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_eos if the given offset is behind the end of the trace buffer.
+ * Returns -pte_invalid if \@encoder is NULL.
+ */
+extern pt_export int pt_enc_sync_set(struct pt_encoder *encoder,
+ uint64_t offset);
+
+/** Get the current packet encoder position.
+ *
+ * Fills the current \@encoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@encoder or \@offset is NULL.
+ */
+extern pt_export int pt_enc_get_offset(const struct pt_encoder *encoder,
+ uint64_t *offset);
+
+/* Return a pointer to \@encoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@encoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_enc_get_config(const struct pt_encoder *encoder);
+
+/** Encode an Intel PT packet.
+ *
+ * Writes \@packet at \@encoder's current position in the Intel PT buffer and
+ * advances the \@encoder beyond the written packet.
+ *
+ * The \@packet.size field is ignored.
+ *
+ * In case of errors, the \@encoder is not advanced and nothing is written
+ * into the Intel PT buffer.
+ *
+ * Returns the number of bytes written on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_bad_opc if \@packet.type is not known.
+ * Returns -pte_bad_packet if \@packet's payload is invalid.
+ * Returns -pte_eos if \@encoder reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@encoder or \@packet is NULL.
+ */
+extern pt_export int pt_enc_next(struct pt_encoder *encoder,
+ const struct pt_packet *packet);
+
+
+
+/* Packet decoder. */
+
+
+
+/** Allocate an Intel PT packet decoder.
+ *
+ * The decoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the decoder.
+ *
+ * The decoder needs to be synchronized before it can be used.
+ */
+extern pt_export struct pt_packet_decoder *
+pt_pkt_alloc_decoder(const struct pt_config *config);
+
+/** Free an Intel PT packet decoder.
+ *
+ * The \@decoder must not be used after a successful return.
+ */
+extern pt_export void pt_pkt_free_decoder(struct pt_packet_decoder *decoder);
+
+/** Synchronize an Intel PT packet decoder.
+ *
+ * Search for the next synchronization point in forward or backward direction.
+ *
+ * If \@decoder has not been synchronized, yet, the search is started at the
+ * beginning of the trace buffer in case of forward synchronization and at the
+ * end of the trace buffer in case of backward synchronization.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_eos if no further synchronization point is found.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_pkt_sync_forward(struct pt_packet_decoder *decoder);
+extern pt_export int pt_pkt_sync_backward(struct pt_packet_decoder *decoder);
+
+/** Hard set synchronization point of an Intel PT decoder.
+ *
+ * Synchronize \@decoder to \@offset within the trace buffer.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_eos if the given offset is behind the end of the trace buffer.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_pkt_sync_set(struct pt_packet_decoder *decoder,
+ uint64_t offset);
+
+/** Get the current decoder position.
+ *
+ * Fills the current \@decoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_pkt_get_offset(const struct pt_packet_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the position of the last synchronization point.
+ *
+ * Fills the last synchronization position into \@offset.
+ *
+ * This is useful when splitting a trace stream for parallel decoding.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int
+pt_pkt_get_sync_offset(const struct pt_packet_decoder *decoder,
+ uint64_t *offset);
+
+/* Return a pointer to \@decoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@decoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_pkt_get_config(const struct pt_packet_decoder *decoder);
+
+/** Decode the next packet and advance the decoder.
+ *
+ * Decodes the packet at \@decoder's current position into \@packet and
+ * adjusts the \@decoder's position by the number of bytes the packet had
+ * consumed.
+ *
+ * The \@size argument must be set to sizeof(struct pt_packet).
+ *
+ * Returns the number of bytes consumed on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_bad_opc if the packet is unknown.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if \@decoder reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@packet is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_pkt_next(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet, size_t size);
+
+
+
+/* Query decoder. */
+
+
+
+/** Decoder status flags. */
+enum pt_status_flag {
+ /** There is an event pending. */
+ pts_event_pending = 1 << 0,
+
+ /** The address has been suppressed. */
+ pts_ip_suppressed = 1 << 1,
+
+ /** There is no more trace data available. */
+ pts_eos = 1 << 2
+};
+
+/** Event types. */
+enum pt_event_type {
+ /* Tracing has been enabled/disabled. */
+ ptev_enabled,
+ ptev_disabled,
+
+ /* Tracing has been disabled asynchronously. */
+ ptev_async_disabled,
+
+ /* An asynchronous branch, e.g. interrupt. */
+ ptev_async_branch,
+
+ /* A synchronous paging event. */
+ ptev_paging,
+
+ /* An asynchronous paging event. */
+ ptev_async_paging,
+
+ /* Trace overflow. */
+ ptev_overflow,
+
+ /* An execution mode change. */
+ ptev_exec_mode,
+
+ /* A transactional execution state change. */
+ ptev_tsx,
+
+ /* Trace Stop. */
+ ptev_stop,
+
+ /* A synchronous vmcs event. */
+ ptev_vmcs,
+
+ /* An asynchronous vmcs event. */
+ ptev_async_vmcs,
+
+ /* Execution has stopped. */
+ ptev_exstop,
+
+ /* An MWAIT operation completed. */
+ ptev_mwait,
+
+ /* A power state was entered. */
+ ptev_pwre,
+
+ /* A power state was exited. */
+ ptev_pwrx,
+
+ /* A PTWRITE event. */
+ ptev_ptwrite,
+
+ /* A timing event. */
+ ptev_tick,
+
+ /* A core:bus ratio event. */
+ ptev_cbr,
+
+ /* A maintenance event. */
+ ptev_mnt
+};
+
+/** An event. */
+struct pt_event {
+ /** The type of the event. */
+ enum pt_event_type type;
+
+ /** A flag indicating that the event IP has been suppressed. */
+ uint32_t ip_suppressed:1;
+
+ /** A flag indicating that the event is for status update. */
+ uint32_t status_update:1;
+
+ /** A flag indicating that the event has timing information. */
+ uint32_t has_tsc:1;
+
+ /** The time stamp count of the event.
+ *
+ * This field is only valid if \@has_tsc is set.
+ */
+ uint64_t tsc;
+
+ /** The number of lost mtc and cyc packets.
+ *
+ * This gives an idea about the quality of the \@tsc. The more packets
+ * were dropped, the less precise timing is.
+ */
+ uint32_t lost_mtc;
+ uint32_t lost_cyc;
+
+ /* Reserved space for future extensions. */
+ uint64_t reserved[2];
+
+ /** Event specific data. */
+ union {
+ /** Event: enabled. */
+ struct {
+ /** The address at which tracing resumes. */
+ uint64_t ip;
+
+ /** A flag indicating that tracing resumes from the IP
+ * at which tracing had been disabled before.
+ */
+ uint32_t resumed:1;
+ } enabled;
+
+ /** Event: disabled. */
+ struct {
+ /** The destination of the first branch inside a
+ * filtered area.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+
+ /* The exact source ip needs to be determined using
+ * disassembly and the filter configuration.
+ */
+ } disabled;
+
+ /** Event: async disabled. */
+ struct {
+ /** The source address of the asynchronous branch that
+ * disabled tracing.
+ */
+ uint64_t at;
+
+ /** The destination of the first branch inside a
+ * filtered area.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+ } async_disabled;
+
+ /** Event: async branch. */
+ struct {
+ /** The branch source address. */
+ uint64_t from;
+
+ /** The branch destination address.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t to;
+ } async_branch;
+
+ /** Event: paging. */
+ struct {
+ /** The updated CR3 value.
+ *
+ * The lower 5 bit have been zeroed out.
+ * The upper bits have been zeroed out depending on the
+ * maximum possible address.
+ */
+ uint64_t cr3;
+
+ /** A flag indicating whether the cpu is operating in
+ * vmx non-root (guest) mode.
+ */
+ uint32_t non_root:1;
+
+ /* The address at which the event is effective is
+ * obvious from the disassembly.
+ */
+ } paging;
+
+ /** Event: async paging. */
+ struct {
+ /** The updated CR3 value.
+ *
+ * The lower 5 bit have been zeroed out.
+ * The upper bits have been zeroed out depending on the
+ * maximum possible address.
+ */
+ uint64_t cr3;
+
+ /** A flag indicating whether the cpu is operating in
+ * vmx non-root (guest) mode.
+ */
+ uint32_t non_root:1;
+
+ /** The address at which the event is effective. */
+ uint64_t ip;
+ } async_paging;
+
+ /** Event: overflow. */
+ struct {
+ /** The address at which tracing resumes after overflow.
+ *
+ * This field is not valid, if ip_suppressed is set.
+ * In this case, the overflow resolved while tracing
+ * was disabled.
+ */
+ uint64_t ip;
+ } overflow;
+
+ /** Event: exec mode. */
+ struct {
+ /** The execution mode. */
+ enum pt_exec_mode mode;
+
+ /** The address at which the event is effective. */
+ uint64_t ip;
+ } exec_mode;
+
+ /** Event: tsx. */
+ struct {
+ /** The address at which the event is effective.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+
+ /** A flag indicating speculative execution mode. */
+ uint32_t speculative:1;
+
+ /** A flag indicating speculative execution aborts. */
+ uint32_t aborted:1;
+ } tsx;
+
+ /** Event: vmcs. */
+ struct {
+ /** The VMCS base address.
+ *
+ * The address is zero-extended with the lower 12 bits
+ * all zero.
+ */
+ uint64_t base;
+
+ /* The new VMCS base address should be stored and
+ * applied on subsequent VM entries.
+ */
+ } vmcs;
+
+ /** Event: async vmcs. */
+ struct {
+ /** The VMCS base address.
+ *
+ * The address is zero-extended with the lower 12 bits
+ * all zero.
+ */
+ uint64_t base;
+
+ /** The address at which the event is effective. */
+ uint64_t ip;
+
+ /* An async paging event that binds to the same IP
+ * will always succeed this async vmcs event.
+ */
+ } async_vmcs;
+
+ /** Event: execution stopped. */
+ struct {
+ /** The address at which execution has stopped. This is
+ * the last instruction that did not complete.
+ *
+ * This field is not valid, if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+ } exstop;
+
+ /** Event: mwait. */
+ struct {
+ /** The address of the instruction causing the mwait.
+ *
+ * This field is not valid, if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+
+ /** The mwait hints (eax).
+ *
+ * Reserved bits are undefined.
+ */
+ uint32_t hints;
+
+ /** The mwait extensions (ecx).
+ *
+ * Reserved bits are undefined.
+ */
+ uint32_t ext;
+ } mwait;
+
+ /** Event: power state entry. */
+ struct {
+ /** The resolved thread C-state. */
+ uint8_t state;
+
+ /** The resolved thread sub C-state. */
+ uint8_t sub_state;
+
+ /** A flag indicating whether the C-state entry was
+ * initiated by h/w.
+ */
+ uint32_t hw:1;
+ } pwre;
+
+ /** Event: power state exit. */
+ struct {
+ /** The core C-state at the time of the wake. */
+ uint8_t last;
+
+ /** The deepest core C-state achieved during sleep. */
+ uint8_t deepest;
+
+ /** The wake reason:
+ *
+ * - due to external interrupt received.
+ */
+ uint32_t interrupt:1;
+
+ /** - due to store to monitored address. */
+ uint32_t store:1;
+
+ /** - due to h/w autonomous condition such as HDC. */
+ uint32_t autonomous:1;
+ } pwrx;
+
+ /** Event: ptwrite. */
+ struct {
+ /** The address of the ptwrite instruction.
+ *
+ * This field is not valid, if \@ip_suppressed is set.
+ *
+ * In this case, the address is obvious from the
+ * disassembly.
+ */
+ uint64_t ip;
+
+ /** The size of the below \@payload in bytes. */
+ uint8_t size;
+
+ /** The ptwrite payload. */
+ uint64_t payload;
+ } ptwrite;
+
+ /** Event: tick. */
+ struct {
+ /** The instruction address near which the tick occured.
+ *
+ * A timestamp can sometimes be attributed directly to
+ * an instruction (e.g. to an indirect branch that
+ * receives CYC + TIP) and sometimes not (e.g. MTC).
+ *
+ * This field is not valid, if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+ } tick;
+
+ /** Event: cbr. */
+ struct {
+ /** The core:bus ratio. */
+ uint16_t ratio;
+ } cbr;
+
+ /** Event: mnt. */
+ struct {
+ /** The raw payload. */
+ uint64_t payload;
+ } mnt;
+ } variant;
+};
+
+
+/** Allocate an Intel PT query decoder.
+ *
+ * The decoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the decoder.
+ *
+ * The decoder needs to be synchronized before it can be used.
+ */
+extern pt_export struct pt_query_decoder *
+pt_qry_alloc_decoder(const struct pt_config *config);
+
+/** Free an Intel PT query decoder.
+ *
+ * The \@decoder must not be used after a successful return.
+ */
+extern pt_export void pt_qry_free_decoder(struct pt_query_decoder *decoder);
+
+/** Synchronize an Intel PT query decoder.
+ *
+ * Search for the next synchronization point in forward or backward direction.
+ *
+ * If \@decoder has not been synchronized, yet, the search is started at the
+ * beginning of the trace buffer in case of forward synchronization and at the
+ * end of the trace buffer in case of backward synchronization.
+ *
+ * If \@ip is not NULL, set it to last ip.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if no further synchronization point is found.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_qry_sync_forward(struct pt_query_decoder *decoder,
+ uint64_t *ip);
+extern pt_export int pt_qry_sync_backward(struct pt_query_decoder *decoder,
+ uint64_t *ip);
+
+/** Manually synchronize an Intel PT query decoder.
+ *
+ * Synchronize \@decoder on the syncpoint at \@offset. There must be a PSB
+ * packet at \@offset.
+ *
+ * If \@ip is not NULL, set it to last ip.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if \@offset lies outside of \@decoder's trace buffer.
+ * Returns -pte_eos if \@decoder reaches the end of its trace buffer.
+ * Returns -pte_invalid if \@decoder is NULL.
+ * Returns -pte_nosync if there is no syncpoint at \@offset.
+ */
+extern pt_export int pt_qry_sync_set(struct pt_query_decoder *decoder,
+ uint64_t *ip, uint64_t offset);
+
+/** Get the current decoder position.
+ *
+ * Fills the current \@decoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_qry_get_offset(const struct pt_query_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the position of the last synchronization point.
+ *
+ * Fills the last synchronization position into \@offset.
+ *
+ * This is useful for splitting a trace stream for parallel decoding.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int
+pt_qry_get_sync_offset(const struct pt_query_decoder *decoder,
+ uint64_t *offset);
+
+/* Return a pointer to \@decoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@decoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_qry_get_config(const struct pt_query_decoder *decoder);
+
+/** Query whether the next unconditional branch has been taken.
+ *
+ * On success, provides 1 (taken) or 0 (not taken) in \@taken for the next
+ * conditional branch and updates \@decoder.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_bad_query if no conditional branch is found.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@taken is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_qry_cond_branch(struct pt_query_decoder *decoder,
+ int *taken);
+
+/** Get the next indirect branch destination.
+ *
+ * On success, provides the linear destination address of the next indirect
+ * branch in \@ip and updates \@decoder.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_bad_query if no indirect branch is found.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@ip is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_qry_indirect_branch(struct pt_query_decoder *decoder,
+ uint64_t *ip);
+
+/** Query the next pending event.
+ *
+ * On success, provides the next event \@event and updates \@decoder.
+ *
+ * The \@size argument must be set to sizeof(struct pt_event).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_bad_query if no event is found.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@event is NULL.
+ * Returns -pte_invalid if \@size is too small.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_qry_event(struct pt_query_decoder *decoder,
+ struct pt_event *event, size_t size);
+
+/** Query the current time.
+ *
+ * On success, provides the time at the last query in \@time.
+ *
+ * The time is similar to what a rdtsc instruction would return. Depending
+ * on the configuration, the time may not be fully accurate. If TSC is not
+ * enabled, the time is relative to the last synchronization and can't be used
+ * to correlate with other TSC-based time sources. In this case, -pte_no_time
+ * is returned and the relative time is provided in \@time.
+ *
+ * Some timing-related packets may need to be dropped (mostly due to missing
+ * calibration or incomplete configuration). To get an idea about the quality
+ * of the estimated time, we record the number of dropped MTC and CYC packets.
+ *
+ * If \@lost_mtc is not NULL, set it to the number of lost MTC packets.
+ * If \@lost_cyc is not NULL, set it to the number of lost CYC packets.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@time is NULL.
+ * Returns -pte_no_time if there has not been a TSC packet.
+ */
+extern pt_export int pt_qry_time(struct pt_query_decoder *decoder,
+ uint64_t *time, uint32_t *lost_mtc,
+ uint32_t *lost_cyc);
+
+/** Return the current core bus ratio.
+ *
+ * On success, provides the current core:bus ratio in \@cbr. The ratio is
+ * defined as core cycles per bus clock cycle.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@cbr is NULL.
+ * Returns -pte_no_cbr if there has not been a CBR packet.
+ */
+extern pt_export int pt_qry_core_bus_ratio(struct pt_query_decoder *decoder,
+ uint32_t *cbr);
+
+
+
+/* Traced image. */
+
+
+
+/** An Intel PT address space identifier.
+ *
+ * This identifies a particular address space when adding file sections or
+ * when reading memory.
+ */
+struct pt_asid {
+ /** The size of this object - set to sizeof(struct pt_asid). */
+ size_t size;
+
+ /** The CR3 value. */
+ uint64_t cr3;
+
+ /** The VMCS Base address. */
+ uint64_t vmcs;
+};
+
+/** An unknown CR3 value to be used for pt_asid objects. */
+static const uint64_t pt_asid_no_cr3 = 0xffffffffffffffffull;
+
+/** An unknown VMCS Base value to be used for pt_asid objects. */
+static const uint64_t pt_asid_no_vmcs = 0xffffffffffffffffull;
+
+/** Initialize an address space identifier. */
+static inline void pt_asid_init(struct pt_asid *asid)
+{
+ asid->size = sizeof(*asid);
+ asid->cr3 = pt_asid_no_cr3;
+ asid->vmcs = pt_asid_no_vmcs;
+}
+
+
+/** A cache of traced image sections. */
+struct pt_image_section_cache;
+
+/** Allocate a traced memory image section cache.
+ *
+ * An optional \@name may be given to the cache. The name string is copied.
+ *
+ * Returns a new traced memory image section cache on success, NULL otherwise.
+ */
+extern pt_export struct pt_image_section_cache *
+pt_iscache_alloc(const char *name);
+
+/** Free a traced memory image section cache.
+ *
+ * The \@iscache must have been allocated with pt_iscache_alloc().
+ * The \@iscache must not be used after a successful return.
+ */
+extern pt_export void pt_iscache_free(struct pt_image_section_cache *iscache);
+
+/** Set the image section cache limit.
+ *
+ * Set the limit for a section cache in bytes. A non-zero limit will keep the
+ * least recently used sections mapped until the limit is reached. A limit of
+ * zero disables caching.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ * Returns -pte_invalid if \@iscache is NULL.
+ */
+extern pt_export int
+pt_iscache_set_limit(struct pt_image_section_cache *iscache, uint64_t limit);
+
+/** Get the image section cache name.
+ *
+ * Returns a pointer to \@iscache's name or NULL if there is no name.
+ */
+extern pt_export const char *
+pt_iscache_name(const struct pt_image_section_cache *iscache);
+
+/** Add a new file section to the traced memory image section cache.
+ *
+ * Adds a new section consisting of \@size bytes starting at \@offset in
+ * \@filename loaded at the virtual address \@vaddr if \@iscache does not
+ * already contain such a section.
+ *
+ * Returns an image section identifier (isid) uniquely identifying that section
+ * in \@iscache.
+ *
+ * The section is silently truncated to match the size of \@filename.
+ *
+ * Returns a positive isid on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@iscache or \@filename is NULL.
+ * Returns -pte_invalid if \@offset is too big.
+ */
+extern pt_export int pt_iscache_add_file(struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset,
+ uint64_t size, uint64_t vaddr);
+
+/** Read memory from a cached file section
+ *
+ * Reads \@size bytes of memory starting at virtual address \@vaddr in the
+ * section identified by \@isid in \@iscache into \@buffer.
+ *
+ * The caller is responsible for allocating a \@buffer of at least \@size bytes.
+ *
+ * The read request may be truncated if it crosses section boundaries or if
+ * \@size is getting too big. We support reading at least 4Kbyte in one chunk
+ * unless the read would cross a section boundary.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@iscache or \@buffer is NULL.
+ * Returns -pte_invalid if \@size is zero.
+ * Returns -pte_nomap if \@vaddr is not contained in section \@isid.
+ * Returns -pte_bad_image if \@iscache does not contain \@isid.
+ */
+extern pt_export int pt_iscache_read(struct pt_image_section_cache *iscache,
+ uint8_t *buffer, uint64_t size, int isid,
+ uint64_t vaddr);
+
+/** The traced memory image. */
+struct pt_image;
+
+
+/** Allocate a traced memory image.
+ *
+ * An optional \@name may be given to the image. The name string is copied.
+ *
+ * Returns a new traced memory image on success, NULL otherwise.
+ */
+extern pt_export struct pt_image *pt_image_alloc(const char *name);
+
+/** Free a traced memory image.
+ *
+ * The \@image must have been allocated with pt_image_alloc().
+ * The \@image must not be used after a successful return.
+ */
+extern pt_export void pt_image_free(struct pt_image *image);
+
+/** Get the image name.
+ *
+ * Returns a pointer to \@image's name or NULL if there is no name.
+ */
+extern pt_export const char *pt_image_name(const struct pt_image *image);
+
+/** Add a new file section to the traced memory image.
+ *
+ * Adds \@size bytes starting at \@offset in \@filename. The section is
+ * loaded at the virtual address \@vaddr in the address space \@asid.
+ *
+ * The \@asid may be NULL or (partially) invalid. In that case only the valid
+ * fields are considered when comparing with other address-spaces. Use this
+ * when tracing a single process or when adding sections to all processes.
+ *
+ * The section is silently truncated to match the size of \@filename.
+ *
+ * Existing sections that would overlap with the new section will be shrunk
+ * or split.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@image or \@filename is NULL.
+ * Returns -pte_invalid if \@offset is too big.
+ */
+extern pt_export int pt_image_add_file(struct pt_image *image,
+ const char *filename, uint64_t offset,
+ uint64_t size,
+ const struct pt_asid *asid,
+ uint64_t vaddr);
+
+/** Add a section from an image section cache.
+ *
+ * Add the section from \@iscache identified by \@isid in address space \@asid.
+ *
+ * Existing sections that would overlap with the new section will be shrunk
+ * or split.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_invalid if \@image or \@iscache is NULL.
+ * Returns -pte_bad_image if \@iscache does not contain \@isid.
+ */
+extern pt_export int pt_image_add_cached(struct pt_image *image,
+ struct pt_image_section_cache *iscache,
+ int isid, const struct pt_asid *asid);
+
+/** Copy an image.
+ *
+ * Adds all sections from \@src to \@image. Sections that could not be added
+ * will be ignored.
+ *
+ * Returns the number of ignored sections on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_invalid if \@image or \@src is NULL.
+ */
+extern pt_export int pt_image_copy(struct pt_image *image,
+ const struct pt_image *src);
+
+/** Remove all sections loaded from a file.
+ *
+ * Removes all sections loaded from \@filename from the address space \@asid.
+ * Specify the same \@asid that was used for adding sections from \@filename.
+ *
+ * Returns the number of removed sections on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_invalid if \@image or \@filename is NULL.
+ */
+extern pt_export int pt_image_remove_by_filename(struct pt_image *image,
+ const char *filename,
+ const struct pt_asid *asid);
+
+/** Remove all sections loaded into an address space.
+ *
+ * Removes all sections loaded into \@asid. Specify the same \@asid that was
+ * used for adding sections.
+ *
+ * Returns the number of removed sections on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_invalid if \@image is NULL.
+ */
+extern pt_export int pt_image_remove_by_asid(struct pt_image *image,
+ const struct pt_asid *asid);
+
+/** A read memory callback function.
+ *
+ * It shall read \@size bytes of memory from address space \@asid starting
+ * at \@ip into \@buffer.
+ *
+ * It shall return the number of bytes read on success.
+ * It shall return a negative pt_error_code otherwise.
+ */
+typedef int (read_memory_callback_t)(uint8_t *buffer, size_t size,
+ const struct pt_asid *asid,
+ uint64_t ip, void *context);
+
+/** Set the memory callback for the traced memory image.
+ *
+ * Sets \@callback for reading memory. The callback is used for addresses
+ * that are not found in file sections. The \@context argument is passed
+ * to \@callback on each use.
+ *
+ * There can only be one callback at any time. A subsequent call will replace
+ * the previous callback. If \@callback is NULL, the callback is removed.
+ *
+ * Returns -pte_invalid if \@image is NULL.
+ */
+extern pt_export int pt_image_set_callback(struct pt_image *image,
+ read_memory_callback_t *callback,
+ void *context);
+
+
+
+/* Instruction flow decoder. */
+
+
+
+/** The instruction class.
+ *
+ * We provide only a very coarse classification suitable for reconstructing
+ * the execution flow.
+ */
+enum pt_insn_class {
+ /* The instruction could not be classified. */
+ ptic_error,
+
+ /* The instruction is something not listed below. */
+ ptic_other,
+
+ /* The instruction is a near (function) call. */
+ ptic_call,
+
+ /* The instruction is a near (function) return. */
+ ptic_return,
+
+ /* The instruction is a near unconditional jump. */
+ ptic_jump,
+
+ /* The instruction is a near conditional jump. */
+ ptic_cond_jump,
+
+ /* The instruction is a call-like far transfer.
+ * E.g. SYSCALL, SYSENTER, or FAR CALL.
+ */
+ ptic_far_call,
+
+ /* The instruction is a return-like far transfer.
+ * E.g. SYSRET, SYSEXIT, IRET, or FAR RET.
+ */
+ ptic_far_return,
+
+ /* The instruction is a jump-like far transfer.
+ * E.g. FAR JMP.
+ */
+ ptic_far_jump,
+
+ /* The instruction is a PTWRITE. */
+ ptic_ptwrite
+};
+
+/** The maximal size of an instruction. */
+enum {
+ pt_max_insn_size = 15
+};
+
+/** A single traced instruction. */
+struct pt_insn {
+ /** The virtual address in its process. */
+ uint64_t ip;
+
+ /** The image section identifier for the section containing this
+ * instruction.
+ *
+ * A value of zero means that the section did not have an identifier.
+ * The section was not added via an image section cache or the memory
+ * was read via the read memory callback.
+ */
+ int isid;
+
+ /** The execution mode. */
+ enum pt_exec_mode mode;
+
+ /** A coarse classification. */
+ enum pt_insn_class iclass;
+
+ /** The raw bytes. */
+ uint8_t raw[pt_max_insn_size];
+
+ /** The size in bytes. */
+ uint8_t size;
+
+ /** A collection of flags giving additional information:
+ *
+ * - the instruction was executed speculatively.
+ */
+ uint32_t speculative:1;
+
+ /** - this instruction is truncated in its image section.
+ *
+ * It starts in the image section identified by \@isid and continues
+ * in one or more other sections.
+ */
+ uint32_t truncated:1;
+};
+
+
+/** Allocate an Intel PT instruction flow decoder.
+ *
+ * The decoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the decoder.
+ *
+ * The decoder needs to be synchronized before it can be used.
+ */
+extern pt_export struct pt_insn_decoder *
+pt_insn_alloc_decoder(const struct pt_config *config);
+
+/** Free an Intel PT instruction flow decoder.
+ *
+ * This will destroy the decoder's default image.
+ *
+ * The \@decoder must not be used after a successful return.
+ */
+extern pt_export void pt_insn_free_decoder(struct pt_insn_decoder *decoder);
+
+/** Synchronize an Intel PT instruction flow decoder.
+ *
+ * Search for the next synchronization point in forward or backward direction.
+ *
+ * If \@decoder has not been synchronized, yet, the search is started at the
+ * beginning of the trace buffer in case of forward synchronization and at the
+ * end of the trace buffer in case of backward synchronization.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if no further synchronization point is found.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_insn_sync_forward(struct pt_insn_decoder *decoder);
+extern pt_export int pt_insn_sync_backward(struct pt_insn_decoder *decoder);
+
+/** Manually synchronize an Intel PT instruction flow decoder.
+ *
+ * Synchronize \@decoder on the syncpoint at \@offset. There must be a PSB
+ * packet at \@offset.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if \@offset lies outside of \@decoder's trace buffer.
+ * Returns -pte_eos if \@decoder reaches the end of its trace buffer.
+ * Returns -pte_invalid if \@decoder is NULL.
+ * Returns -pte_nosync if there is no syncpoint at \@offset.
+ */
+extern pt_export int pt_insn_sync_set(struct pt_insn_decoder *decoder,
+ uint64_t offset);
+
+/** Get the current decoder position.
+ *
+ * Fills the current \@decoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_insn_get_offset(const struct pt_insn_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the position of the last synchronization point.
+ *
+ * Fills the last synchronization position into \@offset.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int
+pt_insn_get_sync_offset(const struct pt_insn_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the traced image.
+ *
+ * The returned image may be modified as long as no decoder that uses this
+ * image is running.
+ *
+ * Returns a pointer to the traced image the decoder uses for reading memory.
+ * Returns NULL if \@decoder is NULL.
+ */
+extern pt_export struct pt_image *
+pt_insn_get_image(struct pt_insn_decoder *decoder);
+
+/** Set the traced image.
+ *
+ * Sets the image that \@decoder uses for reading memory to \@image. If \@image
+ * is NULL, sets the image to \@decoder's default image.
+ *
+ * Only one image can be active at any time.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Return -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_insn_set_image(struct pt_insn_decoder *decoder,
+ struct pt_image *image);
+
+/* Return a pointer to \@decoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@decoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_insn_get_config(const struct pt_insn_decoder *decoder);
+
+/** Return the current time.
+ *
+ * On success, provides the time at the last preceding timing packet in \@time.
+ *
+ * The time is similar to what a rdtsc instruction would return. Depending
+ * on the configuration, the time may not be fully accurate. If TSC is not
+ * enabled, the time is relative to the last synchronization and can't be used
+ * to correlate with other TSC-based time sources. In this case, -pte_no_time
+ * is returned and the relative time is provided in \@time.
+ *
+ * Some timing-related packets may need to be dropped (mostly due to missing
+ * calibration or incomplete configuration). To get an idea about the quality
+ * of the estimated time, we record the number of dropped MTC and CYC packets.
+ *
+ * If \@lost_mtc is not NULL, set it to the number of lost MTC packets.
+ * If \@lost_cyc is not NULL, set it to the number of lost CYC packets.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@time is NULL.
+ * Returns -pte_no_time if there has not been a TSC packet.
+ */
+extern pt_export int pt_insn_time(struct pt_insn_decoder *decoder,
+ uint64_t *time, uint32_t *lost_mtc,
+ uint32_t *lost_cyc);
+
+/** Return the current core bus ratio.
+ *
+ * On success, provides the current core:bus ratio in \@cbr. The ratio is
+ * defined as core cycles per bus clock cycle.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@cbr is NULL.
+ * Returns -pte_no_cbr if there has not been a CBR packet.
+ */
+extern pt_export int pt_insn_core_bus_ratio(struct pt_insn_decoder *decoder,
+ uint32_t *cbr);
+
+/** Return the current address space identifier.
+ *
+ * On success, provides the current address space identifier in \@asid.
+ *
+ * The \@size argument must be set to sizeof(struct pt_asid). At most \@size
+ * bytes will be copied and \@asid->size will be set to the actual size of the
+ * provided address space identifier.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@asid is NULL.
+ */
+extern pt_export int pt_insn_asid(const struct pt_insn_decoder *decoder,
+ struct pt_asid *asid, size_t size);
+
+/** Determine the next instruction.
+ *
+ * On success, provides the next instruction in execution order in \@insn.
+ *
+ * The \@size argument must be set to sizeof(struct pt_insn).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns pts_eos to indicate the end of the trace stream. Subsequent calls
+ * to pt_insn_next() will continue to return pts_eos until trace is required
+ * to determine the next instruction.
+ *
+ * Returns -pte_bad_context if the decoder encountered an unexpected packet.
+ * Returns -pte_bad_opc if the decoder encountered unknown packets.
+ * Returns -pte_bad_packet if the decoder encountered unknown packet payloads.
+ * Returns -pte_bad_query if the decoder got out of sync.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@insn is NULL.
+ * Returns -pte_nomap if the memory at the instruction address can't be read.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_insn_next(struct pt_insn_decoder *decoder,
+ struct pt_insn *insn, size_t size);
+
+/** Get the next pending event.
+ *
+ * On success, provides the next event in \@event and updates \@decoder.
+ *
+ * The \@size argument must be set to sizeof(struct pt_event).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_query if there is no event.
+ * Returns -pte_invalid if \@decoder or \@event is NULL.
+ * Returns -pte_invalid if \@size is too small.
+ */
+extern pt_export int pt_insn_event(struct pt_insn_decoder *decoder,
+ struct pt_event *event, size_t size);
+
+
+
+/* Block decoder. */
+
+
+
+/** A block of instructions.
+ *
+ * Instructions in this block are executed sequentially but are not necessarily
+ * contiguous in memory. Users are expected to follow direct branches.
+ */
+struct pt_block {
+ /** The IP of the first instruction in this block. */
+ uint64_t ip;
+
+ /** The IP of the last instruction in this block.
+ *
+ * This can be used for error-detection.
+ */
+ uint64_t end_ip;
+
+ /** The image section that contains the instructions in this block.
+ *
+ * A value of zero means that the section did not have an identifier.
+ * The section was not added via an image section cache or the memory
+ * was read via the read memory callback.
+ */
+ int isid;
+
+ /** The execution mode for all instructions in this block. */
+ enum pt_exec_mode mode;
+
+ /** The instruction class for the last instruction in this block.
+ *
+ * This field may be set to ptic_error to indicate that the instruction
+ * class is not available. The block decoder may choose to not provide
+ * the instruction class in some cases for performance reasons.
+ */
+ enum pt_insn_class iclass;
+
+ /** The number of instructions in this block. */
+ uint16_t ninsn;
+
+ /** The raw bytes of the last instruction in this block in case the
+ * instruction does not fit entirely into this block's section.
+ *
+ * This field is only valid if \@truncated is set.
+ */
+ uint8_t raw[pt_max_insn_size];
+
+ /** The size of the last instruction in this block in bytes.
+ *
+ * This field is only valid if \@truncated is set.
+ */
+ uint8_t size;
+
+ /** A collection of flags giving additional information about the
+ * instructions in this block.
+ *
+ * - all instructions in this block were executed speculatively.
+ */
+ uint32_t speculative:1;
+
+ /** - the last instruction in this block is truncated.
+ *
+ * It starts in this block's section but continues in one or more
+ * other sections depending on how fragmented the memory image is.
+ *
+ * The raw bytes for the last instruction are provided in \@raw and
+ * its size in \@size in this case.
+ */
+ uint32_t truncated:1;
+};
+
+/** Allocate an Intel PT block decoder.
+ *
+ * The decoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the decoder.
+ *
+ * The decoder needs to be synchronized before it can be used.
+ */
+extern pt_export struct pt_block_decoder *
+pt_blk_alloc_decoder(const struct pt_config *config);
+
+/** Free an Intel PT block decoder.
+ *
+ * This will destroy the decoder's default image.
+ *
+ * The \@decoder must not be used after a successful return.
+ */
+extern pt_export void pt_blk_free_decoder(struct pt_block_decoder *decoder);
+
+/** Synchronize an Intel PT block decoder.
+ *
+ * Search for the next synchronization point in forward or backward direction.
+ *
+ * If \@decoder has not been synchronized, yet, the search is started at the
+ * beginning of the trace buffer in case of forward synchronization and at the
+ * end of the trace buffer in case of backward synchronization.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if no further synchronization point is found.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_blk_sync_forward(struct pt_block_decoder *decoder);
+extern pt_export int pt_blk_sync_backward(struct pt_block_decoder *decoder);
+
+/** Manually synchronize an Intel PT block decoder.
+ *
+ * Synchronize \@decoder on the syncpoint at \@offset. There must be a PSB
+ * packet at \@offset.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if \@offset lies outside of \@decoder's trace buffer.
+ * Returns -pte_eos if \@decoder reaches the end of its trace buffer.
+ * Returns -pte_invalid if \@decoder is NULL.
+ * Returns -pte_nosync if there is no syncpoint at \@offset.
+ */
+extern pt_export int pt_blk_sync_set(struct pt_block_decoder *decoder,
+ uint64_t offset);
+
+/** Get the current decoder position.
+ *
+ * Fills the current \@decoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_blk_get_offset(const struct pt_block_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the position of the last synchronization point.
+ *
+ * Fills the last synchronization position into \@offset.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int
+pt_blk_get_sync_offset(const struct pt_block_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the traced image.
+ *
+ * The returned image may be modified as long as \@decoder is not running.
+ *
+ * Returns a pointer to the traced image \@decoder uses for reading memory.
+ * Returns NULL if \@decoder is NULL.
+ */
+extern pt_export struct pt_image *
+pt_blk_get_image(struct pt_block_decoder *decoder);
+
+/** Set the traced image.
+ *
+ * Sets the image that \@decoder uses for reading memory to \@image. If \@image
+ * is NULL, sets the image to \@decoder's default image.
+ *
+ * Only one image can be active at any time.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Return -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_blk_set_image(struct pt_block_decoder *decoder,
+ struct pt_image *image);
+
+/* Return a pointer to \@decoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@decoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_blk_get_config(const struct pt_block_decoder *decoder);
+
+/** Return the current time.
+ *
+ * On success, provides the time at the last preceding timing packet in \@time.
+ *
+ * The time is similar to what a rdtsc instruction would return. Depending
+ * on the configuration, the time may not be fully accurate. If TSC is not
+ * enabled, the time is relative to the last synchronization and can't be used
+ * to correlate with other TSC-based time sources. In this case, -pte_no_time
+ * is returned and the relative time is provided in \@time.
+ *
+ * Some timing-related packets may need to be dropped (mostly due to missing
+ * calibration or incomplete configuration). To get an idea about the quality
+ * of the estimated time, we record the number of dropped MTC and CYC packets.
+ *
+ * If \@lost_mtc is not NULL, set it to the number of lost MTC packets.
+ * If \@lost_cyc is not NULL, set it to the number of lost CYC packets.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@time is NULL.
+ * Returns -pte_no_time if there has not been a TSC packet.
+ */
+extern pt_export int pt_blk_time(struct pt_block_decoder *decoder,
+ uint64_t *time, uint32_t *lost_mtc,
+ uint32_t *lost_cyc);
+
+/** Return the current core bus ratio.
+ *
+ * On success, provides the current core:bus ratio in \@cbr. The ratio is
+ * defined as core cycles per bus clock cycle.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@cbr is NULL.
+ * Returns -pte_no_cbr if there has not been a CBR packet.
+ */
+extern pt_export int pt_blk_core_bus_ratio(struct pt_block_decoder *decoder,
+ uint32_t *cbr);
+
+/** Return the current address space identifier.
+ *
+ * On success, provides the current address space identifier in \@asid.
+ *
+ * The \@size argument must be set to sizeof(struct pt_asid). At most \@size
+ * bytes will be copied and \@asid->size will be set to the actual size of the
+ * provided address space identifier.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@asid is NULL.
+ */
+extern pt_export int pt_blk_asid(const struct pt_block_decoder *decoder,
+ struct pt_asid *asid, size_t size);
+
+/** Determine the next block of instructions.
+ *
+ * On success, provides the next block of instructions in execution order in
+ * \@block.
+ *
+ * The \@size argument must be set to sizeof(struct pt_block).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns pts_eos to indicate the end of the trace stream. Subsequent calls
+ * to pt_block_next() will continue to return pts_eos until trace is required
+ * to determine the next instruction.
+ *
+ * Returns -pte_bad_context if the decoder encountered an unexpected packet.
+ * Returns -pte_bad_opc if the decoder encountered unknown packets.
+ * Returns -pte_bad_packet if the decoder encountered unknown packet payloads.
+ * Returns -pte_bad_query if the decoder got out of sync.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@block is NULL.
+ * Returns -pte_nomap if the memory at the instruction address can't be read.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_blk_next(struct pt_block_decoder *decoder,
+ struct pt_block *block, size_t size);
+
+/** Get the next pending event.
+ *
+ * On success, provides the next event in \@event and updates \@decoder.
+ *
+ * The \@size argument must be set to sizeof(struct pt_event).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_query if there is no event.
+ * Returns -pte_invalid if \@decoder or \@event is NULL.
+ * Returns -pte_invalid if \@size is too small.
+ */
+extern pt_export int pt_blk_event(struct pt_block_decoder *decoder,
+ struct pt_event *event, size_t size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* INTEL_PT_H */
diff --git a/libipt/internal/include/posix/pt_section_posix.h b/libipt/internal/include/posix/pt_section_posix.h
new file mode 100644
index 000000000000..99e85a834a3b
--- /dev/null
+++ b/libipt/internal/include/posix/pt_section_posix.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SECTION_POSIX_H
+#define PT_SECTION_POSIX_H
+
+#include <stdint.h>
+#include <sys/stat.h>
+
+struct pt_section;
+
+
+/* Fstat-based file status. */
+struct pt_sec_posix_status {
+ /* The file status. */
+ struct stat stat;
+};
+
+/* MMAP-based section mapping information. */
+struct pt_sec_posix_mapping {
+ /* The mmap base address. */
+ uint8_t *base;
+
+ /* The mapped memory size. */
+ uint64_t size;
+
+ /* The begin and end of the mapped memory. */
+ const uint8_t *begin, *end;
+};
+
+
+/* Map a section.
+ *
+ * On success, sets @section's mapping, unmap, and read pointers.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @file are NULL.
+ * Returns -pte_invalid if @section can't be mapped.
+ */
+extern int pt_sec_posix_map(struct pt_section *section, int fd);
+
+/* Unmap a section.
+ *
+ * On success, clears @section's mapping, unmap, and read pointers.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_sec_posix_unmap(struct pt_section *section);
+
+/* Read memory from an mmaped section.
+ *
+ * Reads at most @size bytes from @section at @offset into @buffer.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ * Returns -pte_invalid if @section or @buffer are NULL.
+ * Returns -pte_nomap if @offset is beyond the end of the section.
+ */
+extern int pt_sec_posix_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset);
+
+/* Compute the memory size of a section.
+ *
+ * On success, provides the amount of memory used for mapping @section in bytes
+ * in @size.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @size is NULL.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_sec_posix_memsize(const struct pt_section *section,
+ uint64_t *size);
+
+#endif /* PT_SECTION_POSIX_H */
diff --git a/libipt/internal/include/pt_asid.h b/libipt/internal/include/pt_asid.h
new file mode 100644
index 000000000000..cded0c1092d3
--- /dev/null
+++ b/libipt/internal/include/pt_asid.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_ASID_H
+#define PT_ASID_H
+
+#include <stddef.h>
+
+struct pt_asid;
+
+
+/* Read an asid provided by our user.
+ *
+ * Translate a user-provided asid in @user into @asid. This uses default values
+ * for fields that are not provided by the user and for all fields, if @user is
+ * NULL.
+ *
+ * Fields set in @user that are not known (i.e. from a newer version of this
+ * library) will be ignored.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal, if @asid is NULL.
+ */
+extern int pt_asid_from_user(struct pt_asid *asid, const struct pt_asid *user);
+
+/* Provide an asid to the user.
+ *
+ * Translate @asid into a potentially older or newer version in @user.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal, if @user or @asid is NULL.
+ * Returns -pte_invalid, if @size is too small.
+ */
+extern int pt_asid_to_user(struct pt_asid *user, const struct pt_asid *asid,
+ size_t size);
+
+/* Match two asids.
+ *
+ * Asids match if all fields provide either default values or are identical.
+ *
+ * Returns a positive number if @lhs matches @rhs.
+ * Returns zero if @lhs does not match @rhs.
+ * Returns a negative error code otherwise.
+ *
+ * Returns -pte_internal if @lhs or @rhs are NULL.
+ */
+extern int pt_asid_match(const struct pt_asid *lhs, const struct pt_asid *rhs);
+
+#endif /* PT_ASID_H */
diff --git a/libipt/internal/include/pt_block_cache.h b/libipt/internal/include/pt_block_cache.h
new file mode 100644
index 000000000000..552fd93a7bb6
--- /dev/null
+++ b/libipt/internal/include/pt_block_cache.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_BLOCK_CACHE_H
+#define PT_BLOCK_CACHE_H
+
+#include "intel-pt.h"
+
+#include <stdint.h>
+
+
+/* A block cache entry qualifier.
+ *
+ * This describes what to do at the decision point determined by a block cache
+ * entry.
+ */
+enum pt_bcache_qualifier {
+ /* This is not a decision point.
+ *
+ * The next decision point is too far away and one or more fields
+ * threatened to overflow so we had to stop somewhere on our way.
+ *
+ * Apply the displacement and number of instructions and continue from
+ * the resulting IP.
+ */
+ ptbq_again,
+
+ /* The decision point is a conditional branch.
+ *
+ * This requires a conditional branch query.
+ *
+ * The isize field should provide the size of the branch instruction so
+ * only taken branches require the instruction to be decoded.
+ */
+ ptbq_cond,
+
+ /* The decision point is a near indirect call.
+ *
+ * This requires a return-address stack update and an indirect branch
+ * query.
+ *
+ * The isize field should provide the size of the call instruction so
+ * the return address can be computed by adding it to the displacement
+ * that brings us to the call instruction.
+ *
+ * No instruction decode is required.
+ */
+ ptbq_ind_call,
+
+ /* The decision point is a near return.
+ *
+ * The return may be compressed so this requires a conditional branch
+ * query to determine the compression state and either a return-address
+ * stack lookup or an indirect branch query.
+ *
+ * No instruction decode is required.
+ */
+ ptbq_return,
+
+ /* The decision point is an indirect jump or far branch.
+ *
+ * This requires an indirect branch query.
+ *
+ * No instruction decode is required.
+ */
+ ptbq_indirect,
+
+ /* The decision point requires the instruction at the decision point IP
+ * to be decoded to determine the next step.
+ *
+ * This is used for
+ *
+ * - near direct calls that need to maintain the return-address stack.
+ *
+ * - near direct jumps that are too far away to be handled with a
+ * block cache entry as they would overflow the displacement field.
+ */
+ ptbq_decode
+};
+
+/* A block cache entry.
+ *
+ * There will be one such entry per byte of decoded memory image. Each entry
+ * corresponds to an IP in the traced memory image. The cache is initialized
+ * with invalid entries for all IPs.
+ *
+ * Only entries for the first byte of each instruction will be used; other
+ * entries are ignored and will remain invalid.
+ *
+ * Each valid entry gives the distance from the entry's IP to the next decision
+ * point both in bytes and in the number of instructions.
+ */
+struct pt_bcache_entry {
+ /* The displacement to the next decision point in bytes.
+ *
+ * This is zero if we are at a decision point except for ptbq_again
+ * where it gives the displacement to the next block cache entry to be
+ * used.
+ */
+ int32_t displacement:16;
+
+ /* The number of instructions to the next decision point.
+ *
+ * This is typically one at a decision point since we are already
+ * accounting for the instruction at the decision point.
+ *
+ * Note that this field must be smaller than the respective struct
+ * pt_block field so we can fit one block cache entry into an empty
+ * block.
+ */
+ uint32_t ninsn:8;
+
+ /* The execution mode for all instruction between here and the next
+ * decision point.
+ *
+ * This is enum pt_exec_mode.
+ *
+ * This is ptem_unknown if the entry is not valid.
+ */
+ uint32_t mode:2;
+
+ /* The decision point qualifier.
+ *
+ * This is enum pt_bcache_qualifier.
+ */
+ uint32_t qualifier:3;
+
+ /* The size of the instruction at the decision point.
+ *
+ * This is zero if the size is too big to fit into the field. In this
+ * case, the instruction needs to be decoded to determine its size.
+ */
+ uint32_t isize:3;
+};
+
+/* Get the execution mode of a block cache entry. */
+static inline enum pt_exec_mode pt_bce_exec_mode(struct pt_bcache_entry bce)
+{
+ return (enum pt_exec_mode) bce.mode;
+}
+
+/* Get the block cache qualifier of a block cache entry. */
+static inline enum pt_bcache_qualifier
+pt_bce_qualifier(struct pt_bcache_entry bce)
+{
+ return (enum pt_bcache_qualifier) bce.qualifier;
+}
+
+/* Check if a block cache entry is valid. */
+static inline int pt_bce_is_valid(struct pt_bcache_entry bce)
+{
+ return pt_bce_exec_mode(bce) != ptem_unknown;
+}
+
+
+
+/* A block cache. */
+struct pt_block_cache {
+ /* The number of cache entries. */
+ uint32_t nentries;
+
+ /* A variable-length array of @nentries entries. */
+ struct pt_bcache_entry entry[];
+};
+
+/* Create a block cache.
+ *
+ * @nentries is the number of entries in the cache and should match the size of
+ * the to-be-cached section in bytes.
+ */
+extern struct pt_block_cache *pt_bcache_alloc(uint64_t nentries);
+
+/* Destroy a block cache. */
+extern void pt_bcache_free(struct pt_block_cache *bcache);
+
+/* Cache a block.
+ *
+ * It is expected that all calls for the same @index write the same @bce.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @bcache is NULL.
+ * Returns -pte_internal if @index is outside of @bcache.
+ */
+extern int pt_bcache_add(struct pt_block_cache *bcache, uint64_t index,
+ struct pt_bcache_entry bce);
+
+/* Lookup a cached block.
+ *
+ * The returned cache entry need not be valid. The caller is expected to check
+ * for validity using pt_bce_is_valid(*@bce).
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @bcache or @bce is NULL.
+ * Returns -pte_internal if @index is outside of @bcache.
+ */
+extern int pt_bcache_lookup(struct pt_bcache_entry *bce,
+ const struct pt_block_cache *bcache,
+ uint64_t index);
+
+#endif /* PT_BLOCK_CACHE_H */
diff --git a/libipt/internal/include/pt_block_decoder.h b/libipt/internal/include/pt_block_decoder.h
new file mode 100644
index 000000000000..b965be1fc5fd
--- /dev/null
+++ b/libipt/internal/include/pt_block_decoder.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_BLOCK_DECODER_H
+#define PT_BLOCK_DECODER_H
+
+#include "pt_query_decoder.h"
+#include "pt_image.h"
+#include "pt_retstack.h"
+#include "pt_ild.h"
+#include "pt_msec_cache.h"
+
+
+/* A block decoder.
+ *
+ * It decodes Intel(R) Processor Trace into a sequence of instruction blocks
+ * such that the instructions in each block can be decoded without further need
+ * of trace.
+ */
+struct pt_block_decoder {
+ /* The Intel(R) Processor Trace query decoder. */
+ struct pt_query_decoder query;
+
+ /* The configuration flags.
+ *
+ * Those are our flags set by the user. In @query.config.flags, we set
+ * the flags we need for the query decoder.
+ */
+ struct pt_conf_flags flags;
+
+ /* The default image. */
+ struct pt_image default_image;
+
+ /* The image. */
+ struct pt_image *image;
+
+ /* The current cached section. */
+ struct pt_msec_cache scache;
+
+ /* The current address space. */
+ struct pt_asid asid;
+
+ /* The current Intel(R) Processor Trace event. */
+ struct pt_event event;
+
+ /* The call/return stack for ret compression. */
+ struct pt_retstack retstack;
+
+ /* The current instruction.
+ *
+ * This is only valid if @process_insn is set.
+ */
+ struct pt_insn insn;
+ struct pt_insn_ext iext;
+
+ /* The start IP of the next block.
+ *
+ * If tracing is disabled, this is the IP at which we assume tracing to
+ * be resumed.
+ */
+ uint64_t ip;
+
+ /* The current execution mode. */
+ enum pt_exec_mode mode;
+
+ /* The status of the last successful decoder query.
+ *
+ * Errors are reported directly; the status is always a non-negative
+ * pt_status_flag bit-vector.
+ */
+ int status;
+
+ /* A collection of flags defining how to proceed flow reconstruction:
+ *
+ * - tracing is enabled.
+ */
+ uint32_t enabled:1;
+
+ /* - process @event. */
+ uint32_t process_event:1;
+
+ /* - instructions are executed speculatively. */
+ uint32_t speculative:1;
+
+ /* - process @insn/@iext.
+ *
+ * We have started processing events binding to @insn/@iext. The
+ * instruction has been accounted for in the previous block, but we
+ * have not yet proceeded past it.
+ *
+ * We will do so in pt_blk_event() after processing all events that
+ * bind to it.
+ */
+ uint32_t process_insn:1;
+
+ /* - a paging event has already been bound to @insn/@iext. */
+ uint32_t bound_paging:1;
+
+ /* - a vmcs event has already been bound to @insn/@iext. */
+ uint32_t bound_vmcs:1;
+
+ /* - a ptwrite event has already been bound to @insn/@iext. */
+ uint32_t bound_ptwrite:1;
+};
+
+
+/* Initialize a block decoder.
+ *
+ * Returns zero on success; a negative error code otherwise.
+ * Returns -pte_internal, if @decoder or @config is NULL.
+ */
+extern int pt_blk_decoder_init(struct pt_block_decoder *decoder,
+ const struct pt_config *config);
+
+/* Finalize a block decoder. */
+extern void pt_blk_decoder_fini(struct pt_block_decoder *decoder);
+
+#endif /* PT_BLOCK_DECODER_H */
diff --git a/libipt/internal/include/pt_config.h b/libipt/internal/include/pt_config.h
new file mode 100644
index 000000000000..406130efb5d7
--- /dev/null
+++ b/libipt/internal/include/pt_config.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "intel-pt.h"
+
+
+/* Read the configuration provided by a library user and zero-initialize
+ * missing fields.
+ *
+ * We keep the user's size value if it is smaller than sizeof(*@config) to
+ * allow decoders to detect missing configuration bits.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @config is NULL.
+ * Returns -pte_invalid if @uconfig is NULL.
+ * Returns -pte_bad_config if @config is too small to be useful.
+ */
+extern int pt_config_from_user(struct pt_config *config,
+ const struct pt_config *uconfig);
+
+/* Get the configuration for the n'th address filter.
+ *
+ * Returns zero if @filter is NULL or @n is out of bounds.
+ *
+ * This corresponds to IA32_RTIT_CTL.ADDRn_CFG.
+ */
+extern uint32_t pt_filter_addr_cfg(const struct pt_conf_addr_filter *filter,
+ uint8_t n);
+
+/* Get the lower bound (inclusive) of the n'th address filter.
+ *
+ * Returns zero if @filter is NULL or @n is out of bounds.
+ *
+ * This corresponds to IA32_RTIT_ADDRn_A.
+ */
+extern uint64_t pt_filter_addr_a(const struct pt_conf_addr_filter *filter,
+ uint8_t n);
+
+/* Get the upper bound (inclusive) of the n'th address filter.
+ *
+ * Returns zero if @filter is NULL or @n is out of bounds.
+ *
+ * This corresponds to IA32_RTIT_ADDRn_B.
+ */
+extern uint64_t pt_filter_addr_b(const struct pt_conf_addr_filter *filter,
+ uint8_t n);
+
+/* Check address filters.
+ *
+ * Checks @addr against @filter.
+ *
+ * Returns a positive number if @addr lies in a tracing-enabled region.
+ * Returns zero if @addr lies in a tracing-disabled region.
+ * Returns a negative pt_error_code otherwise.
+ */
+extern int pt_filter_addr_check(const struct pt_conf_addr_filter *filter,
+ uint64_t addr);
diff --git a/libipt/internal/include/pt_cpu.h b/libipt/internal/include/pt_cpu.h
new file mode 100644
index 000000000000..3ab40446083f
--- /dev/null
+++ b/libipt/internal/include/pt_cpu.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_CPU_H
+#define PT_CPU_H
+
+struct pt_cpu;
+
+/* Parses @s which should be of format family/model[/stepping] and
+ * stores the value in @cpu on success.
+ * The optional stepping defaults to 0 if omitted.
+ *
+ * Returns 0 on success.
+ * Returns -pte_invalid if @cpu or @s is NULL.
+ * Returns -pte_invalid if @s could not be parsed.
+ */
+extern int pt_cpu_parse(struct pt_cpu *cpu, const char *s);
+
+/* Get the cpu we're running on.
+ *
+ * Reads the family/model/stepping of the processor on which this function
+ * is executed and stores the value in @cpu.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_invalid if @cpu is NULL.
+ */
+extern int pt_cpu_read(struct pt_cpu *cpu);
+
+#endif /* PT_CPU_H */
diff --git a/libipt/internal/include/pt_cpuid.h b/libipt/internal/include/pt_cpuid.h
new file mode 100644
index 000000000000..e5afabf9079b
--- /dev/null
+++ b/libipt/internal/include/pt_cpuid.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_CPUID_H
+#define PT_CPUID_H
+
+#include <inttypes.h>
+
+/* Execute cpuid with @leaf set in the eax register.
+ * The result is stored in @eax, @ebx, @ecx and @edx.
+ */
+extern void pt_cpuid(uint32_t leaf, uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx);
+
+#endif /* PT_CPUID_H */
diff --git a/libipt/internal/include/pt_decoder_function.h b/libipt/internal/include/pt_decoder_function.h
new file mode 100644
index 000000000000..9bed3f29f720
--- /dev/null
+++ b/libipt/internal/include/pt_decoder_function.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_DECODER_FUNCTION_H
+#define PT_DECODER_FUNCTION_H
+
+#include <stdint.h>
+
+struct pt_query_decoder;
+struct pt_packet_decoder;
+struct pt_packet;
+struct pt_config;
+
+
+/* Intel(R) Processor Trace decoder function flags. */
+enum pt_decoder_function_flag {
+ /* The decoded packet contains an unconditional branch destination. */
+ pdff_tip = 1 << 0,
+
+ /* The decode packet contains unconditional branch destinations. */
+ pdff_tnt = 1 << 1,
+
+ /* The decoded packet contains an event. */
+ pdff_event = 1 << 2,
+
+ /* The decoded packet marks the end of a PSB header. */
+ pdff_psbend = 1 << 3,
+
+ /* The decoded packet contains a non-branch IP update. */
+ pdff_fup = 1 << 4,
+
+ /* The decoded packet is unknown to the decoder. */
+ pdff_unknown = 1 << 5,
+
+ /* The decoded packet contains timing information. */
+ pdff_timing = 1 << 6,
+
+ /* The decoded packet contains padding. */
+ pdff_pad = 1 << 7
+};
+
+/* An Intel(R) Processor Trace decoder function. */
+struct pt_decoder_function {
+ /* The function to analyze the next packet. */
+ int (*packet)(struct pt_packet_decoder *, struct pt_packet *);
+
+ /* The function to decode the next packet. */
+ int (*decode)(struct pt_query_decoder *);
+
+ /* The function to decode the next packet in segment header
+ * context, i.e. between PSB and ENDPSB.
+ */
+ int (*header)(struct pt_query_decoder *);
+
+ /* Decoder function flags. */
+ int flags;
+};
+
+
+/* Fetch the decoder function.
+ *
+ * Sets @dfun to the decoder function for decoding the packet at @pos.
+ *
+ * Returns 0 on success.
+ * Returns -pte_internal if @dfun or @config is NULL.
+ * Returns -pte_nosync if @pos is NULL or outside @config's trace buffer.
+ * Returns -pte_eos if the opcode is incomplete or missing.
+ */
+extern int pt_df_fetch(const struct pt_decoder_function **dfun,
+ const uint8_t *pos, const struct pt_config *config);
+
+
+/* Decoder functions for the various packet types.
+ *
+ * Do not call those functions directly!
+ */
+extern const struct pt_decoder_function pt_decode_unknown;
+extern const struct pt_decoder_function pt_decode_pad;
+extern const struct pt_decoder_function pt_decode_psb;
+extern const struct pt_decoder_function pt_decode_tip;
+extern const struct pt_decoder_function pt_decode_tnt_8;
+extern const struct pt_decoder_function pt_decode_tnt_64;
+extern const struct pt_decoder_function pt_decode_tip_pge;
+extern const struct pt_decoder_function pt_decode_tip_pgd;
+extern const struct pt_decoder_function pt_decode_fup;
+extern const struct pt_decoder_function pt_decode_pip;
+extern const struct pt_decoder_function pt_decode_ovf;
+extern const struct pt_decoder_function pt_decode_mode;
+extern const struct pt_decoder_function pt_decode_psbend;
+extern const struct pt_decoder_function pt_decode_tsc;
+extern const struct pt_decoder_function pt_decode_cbr;
+extern const struct pt_decoder_function pt_decode_tma;
+extern const struct pt_decoder_function pt_decode_mtc;
+extern const struct pt_decoder_function pt_decode_cyc;
+extern const struct pt_decoder_function pt_decode_stop;
+extern const struct pt_decoder_function pt_decode_vmcs;
+extern const struct pt_decoder_function pt_decode_mnt;
+extern const struct pt_decoder_function pt_decode_exstop;
+extern const struct pt_decoder_function pt_decode_mwait;
+extern const struct pt_decoder_function pt_decode_pwre;
+extern const struct pt_decoder_function pt_decode_pwrx;
+extern const struct pt_decoder_function pt_decode_ptw;
+
+#endif /* PT_DECODER_FUNCTION_H */
diff --git a/libipt/internal/include/pt_encoder.h b/libipt/internal/include/pt_encoder.h
new file mode 100644
index 000000000000..9d48a34a863d
--- /dev/null
+++ b/libipt/internal/include/pt_encoder.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_ENCODER_H
+#define PT_ENCODER_H
+
+#include "intel-pt.h"
+
+
+/* An Intel PT packet encoder. */
+struct pt_encoder {
+ /* The encoder configuration. */
+ struct pt_config config;
+
+ /** The current position in the trace buffer. */
+ uint8_t *pos;
+};
+
+
+/* Initialize the packet encoder.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern int pt_encoder_init(struct pt_encoder *, const struct pt_config *);
+
+/* Finalize the packet encoder. */
+extern void pt_encoder_fini(struct pt_encoder *);
+
+
+/* The below functions are convenience wrappers around pt_enc_next(). */
+
+/* Encode a Padding (pad) packet. */
+extern int pt_encode_pad(struct pt_encoder *);
+
+/* Encode a Packet Stream Boundary (psb) packet. */
+extern int pt_encode_psb(struct pt_encoder *);
+
+/* Encode an End PSB (psbend) packet. */
+extern int pt_encode_psbend(struct pt_encoder *);
+
+/* Encode a Target Instruction Pointer (tip) packet. */
+extern int pt_encode_tip(struct pt_encoder *, uint64_t ip,
+ enum pt_ip_compression ipc);
+
+/* Encode a Taken Not Taken (tnt) packet - 8-bit version. */
+extern int pt_encode_tnt_8(struct pt_encoder *, uint8_t tnt, int size);
+
+/* Encode a Taken Not Taken (tnt) packet - 64-bit version. */
+extern int pt_encode_tnt_64(struct pt_encoder *, uint64_t tnt, int size);
+
+/* Encode a Packet Generation Enable (tip.pge) packet. */
+extern int pt_encode_tip_pge(struct pt_encoder *, uint64_t ip,
+ enum pt_ip_compression ipc);
+
+/* Encode a Packet Generation Disable (tip.pgd) packet. */
+extern int pt_encode_tip_pgd(struct pt_encoder *, uint64_t ip,
+ enum pt_ip_compression ipc);
+
+/* Encode a Flow Update Packet (fup). */
+extern int pt_encode_fup(struct pt_encoder *, uint64_t ip,
+ enum pt_ip_compression ipc);
+
+/* Encode a Paging Information Packet (pip). */
+extern int pt_encode_pip(struct pt_encoder *, uint64_t cr3, uint8_t flags);
+
+/* Encode a Overflow Packet (ovf). */
+extern int pt_encode_ovf(struct pt_encoder *);
+
+/* Encode a Mode Exec Packet (mode.exec). */
+extern int pt_encode_mode_exec(struct pt_encoder *, enum pt_exec_mode);
+
+/* Encode a Mode Tsx Packet (mode.tsx). */
+extern int pt_encode_mode_tsx(struct pt_encoder *, uint8_t);
+
+/* Encode a Time Stamp Counter (tsc) packet. */
+extern int pt_encode_tsc(struct pt_encoder *, uint64_t);
+
+/* Encode a Core Bus Ratio (cbr) packet. */
+extern int pt_encode_cbr(struct pt_encoder *, uint8_t);
+
+/* Encode a TSC/MTC Alignment (tma) packet. */
+extern int pt_encode_tma(struct pt_encoder *, uint16_t ctc,
+ uint16_t fc);
+
+/* Encode a Mini Time Counter (mtc) packet. */
+extern int pt_encode_mtc(struct pt_encoder *, uint8_t ctc);
+
+/* Encode a Cycle Count (cyc) packet. */
+extern int pt_encode_cyc(struct pt_encoder *, uint32_t cyc);
+
+/* Encode a TraceStop Packet (stop). */
+extern int pt_encode_stop(struct pt_encoder *);
+
+/* Encode a VMCS packet. */
+extern int pt_encode_vmcs(struct pt_encoder *, uint64_t);
+
+/* Encode a Maintenance (mnt) packet. */
+extern int pt_encode_mnt(struct pt_encoder *, uint64_t);
+
+#endif /* PT_ENCODER_H */
diff --git a/libipt/internal/include/pt_event_queue.h b/libipt/internal/include/pt_event_queue.h
new file mode 100644
index 000000000000..c606dfa39752
--- /dev/null
+++ b/libipt/internal/include/pt_event_queue.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_EVENT_QUEUE_H
+#define PT_EVENT_QUEUE_H
+
+#include "intel-pt.h"
+
+#include <stdint.h>
+
+
+/* Events are grouped by the packet the event binds to. */
+enum pt_event_binding {
+ evb_psbend,
+ evb_tip,
+ evb_fup,
+
+ evb_max
+};
+
+enum {
+ /* The maximal number of pending events - should be a power of two. */
+ evq_max = 8
+};
+
+/* A queue of events. */
+struct pt_event_queue {
+ /* A collection of event queues, one per binding. */
+ struct pt_event queue[evb_max][evq_max];
+
+ /* The begin and end indices for the above event queues. */
+ uint8_t begin[evb_max];
+ uint8_t end[evb_max];
+
+ /* A standalone event to be published immediately. */
+ struct pt_event standalone;
+};
+
+
+/* Initialize (or reset) an event queue. */
+extern void pt_evq_init(struct pt_event_queue *);
+
+/* Get a standalone event.
+ *
+ * Returns a pointer to the standalone event on success.
+ * Returns NULL if @evq is NULL.
+ */
+extern struct pt_event *pt_evq_standalone(struct pt_event_queue *evq);
+
+/* Enqueue an event.
+ *
+ * Adds a new event to @evq for binding @evb.
+ *
+ * Returns a pointer to the new event on success.
+ * Returns NULL if @evq is NULL or @evb is invalid.
+ * Returns NULL if @evq is full.
+ */
+extern struct pt_event *pt_evq_enqueue(struct pt_event_queue *evq,
+ enum pt_event_binding evb);
+
+
+/* Dequeue an event.
+ *
+ * Removes the first event for binding @evb from @evq.
+ *
+ * Returns a pointer to the dequeued event on success.
+ * Returns NULL if @evq is NULL or @evb is invalid.
+ * Returns NULL if @evq is empty.
+ */
+extern struct pt_event *pt_evq_dequeue(struct pt_event_queue *evq,
+ enum pt_event_binding evb);
+
+/* Clear a queue and discard events.
+ *
+ * Removes all events for binding @evb from @evq.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @evq is NULL or @evb is invalid.
+ */
+extern int pt_evq_clear(struct pt_event_queue *evq,
+ enum pt_event_binding evb);
+
+/* Check for emptiness.
+ *
+ * Check if @evq for binding @evb is empty.
+ *
+ * Returns a positive number if @evq is empty.
+ * Returns zero if @evq is not empty.
+ * Returns -pte_internal if @evq is NULL or @evb is invalid.
+ */
+extern int pt_evq_empty(const struct pt_event_queue *evq,
+ enum pt_event_binding evb);
+
+/* Check for non-emptiness.
+ *
+ * Check if @evq for binding @evb contains pending events.
+ *
+ * Returns a positive number if @evq is not empty.
+ * Returns zero if @evq is empty.
+ * Returns -pte_internal if @evq is NULL or @evb is invalid.
+ */
+extern int pt_evq_pending(const struct pt_event_queue *evq,
+ enum pt_event_binding evb);
+
+/* Find an event by type.
+ *
+ * Searches @evq for binding @evb for an event of type @evt.
+ *
+ * Returns a pointer to the first matching event on success.
+ * Returns NULL if there is no such event.
+ * Returns NULL if @evq is NULL.
+ * Returns NULL if @evb or @evt is invalid.
+ */
+extern struct pt_event *pt_evq_find(struct pt_event_queue *evq,
+ enum pt_event_binding evb,
+ enum pt_event_type evt);
+
+#endif /* PT_EVENT_QUEUE_H */
diff --git a/libipt/internal/include/pt_ild.h b/libipt/internal/include/pt_ild.h
new file mode 100644
index 000000000000..d0d0e915fb07
--- /dev/null
+++ b/libipt/internal/include/pt_ild.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(PT_ILD_H)
+#define PT_ILD_H
+
+#include "pt_insn.h"
+
+#include "intel-pt.h"
+
+
+typedef enum {
+ PTI_MAP_0, /* 1-byte opcodes. may have modrm */
+ PTI_MAP_1, /* 2-byte opcodes (0x0f). may have modrm */
+ PTI_MAP_2, /* 3-byte opcodes (0x0f38). has modrm */
+ PTI_MAP_3, /* 3-byte opcodes (0x0f3a). has modrm */
+ PTI_MAP_AMD3DNOW, /* 3d-now opcodes (0x0f0f). has modrm */
+ PTI_MAP_INVALID
+} pti_map_enum_t;
+
+struct pt_ild {
+ /* inputs */
+ uint8_t const *itext;
+ uint8_t max_bytes; /*1..15 bytes */
+ enum pt_exec_mode mode;
+
+ union {
+ struct {
+ uint32_t osz:1;
+ uint32_t asz:1;
+ uint32_t lock:1;
+ uint32_t f3:1;
+ uint32_t f2:1;
+ uint32_t last_f2f3:2; /* 2 or 3 */
+ /* The vex bit is set for c4/c5 VEX and EVEX. */
+ uint32_t vex:1;
+ /* The REX.R and REX.W bits in REX, VEX, or EVEX. */
+ uint32_t rex_r:1;
+ uint32_t rex_w:1;
+ } s;
+ uint32_t i;
+ } u;
+ uint8_t imm1_bytes; /* # of bytes in 1st immediate */
+ uint8_t imm2_bytes; /* # of bytes in 2nd immediate */
+ uint8_t disp_bytes; /* # of displacement bytes */
+ uint8_t modrm_byte;
+ /* 5b but valid values= 0,1,2,3 could be in bit union */
+ uint8_t map;
+ uint8_t rex; /* 0b0100wrxb */
+ uint8_t nominal_opcode;
+ uint8_t disp_pos;
+ /* imm_pos can be derived from disp_pos + disp_bytes. */
+};
+
+static inline pti_map_enum_t pti_get_map(const struct pt_ild *ild)
+{
+ return (pti_map_enum_t) ild->map;
+}
+
+static inline uint8_t pti_get_modrm_mod(const struct pt_ild *ild)
+{
+ return ild->modrm_byte >> 6;
+}
+
+static inline uint8_t pti_get_modrm_reg(const struct pt_ild *ild)
+{
+ return (ild->modrm_byte >> 3) & 7;
+}
+
+static inline uint8_t pti_get_modrm_rm(const struct pt_ild *ild)
+{
+ return ild->modrm_byte & 7;
+}
+
+/* MAIN ENTRANCE POINTS */
+
+/* one time call. not thread safe init. call when single threaded. */
+extern void pt_ild_init(void);
+
+/* all decoding is multithread safe. */
+
+/* Decode one instruction.
+ *
+ * Input:
+ *
+ * @insn->ip: the virtual address of the instruction
+ * @insn->raw: the memory at that virtual address
+ * @insn->size: the maximal size of the instruction
+ * @insn->mode: the execution mode
+ *
+ * Output:
+ *
+ * @insn->size: the actual size of the instruction
+ * @insn->iclass: a coarse classification
+ *
+ * @iext->iclass: a finer grain classification
+ * @iext->variant: instruction class dependent information
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern int pt_ild_decode(struct pt_insn *insn, struct pt_insn_ext *iext);
+
+#endif /* PT_ILD_H */
diff --git a/libipt/internal/include/pt_image.h b/libipt/internal/include/pt_image.h
new file mode 100644
index 000000000000..dbc2186bea18
--- /dev/null
+++ b/libipt/internal/include/pt_image.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_IMAGE_H
+#define PT_IMAGE_H
+
+#include "pt_mapped_section.h"
+
+#include "intel-pt.h"
+
+#include <stdint.h>
+
+
+/* A list of sections. */
+struct pt_section_list {
+ /* The next list element. */
+ struct pt_section_list *next;
+
+ /* The mapped section. */
+ struct pt_mapped_section section;
+
+ /* The image section identifier. */
+ int isid;
+};
+
+/* A traced image consisting of a collection of sections. */
+struct pt_image {
+ /* The optional image name. */
+ char *name;
+
+ /* The list of sections. */
+ struct pt_section_list *sections;
+
+ /* An optional read memory callback. */
+ struct {
+ /* The callback function. */
+ read_memory_callback_t *callback;
+
+ /* The callback context. */
+ void *context;
+ } readmem;
+};
+
+/* Initialize an image with an optional @name. */
+extern void pt_image_init(struct pt_image *image, const char *name);
+
+/* Finalize an image.
+ *
+ * This removes all sections and frees the name.
+ */
+extern void pt_image_fini(struct pt_image *image);
+
+/* Add a section to an image.
+ *
+ * Add @section identified by @isid to @image at @vaddr in @asid. If @section
+ * overlaps with existing sections, the existing sections are shrunk, split, or
+ * removed to accomodate @section. Absence of a section identifier is indicated
+ * by an @isid of zero.
+ *
+ * Returns zero on success.
+ * Returns -pte_internal if @image, @section, or @asid is NULL.
+ */
+extern int pt_image_add(struct pt_image *image, struct pt_section *section,
+ const struct pt_asid *asid, uint64_t vaddr, int isid);
+
+/* Remove a section from an image.
+ *
+ * Returns zero on success.
+ * Returns -pte_internal if @image, @section, or @asid is NULL.
+ * Returns -pte_bad_image if @image does not contain @section at @vaddr.
+ */
+extern int pt_image_remove(struct pt_image *image, struct pt_section *section,
+ const struct pt_asid *asid, uint64_t vaddr);
+
+/* Read memory from an image.
+ *
+ * Reads at most @size bytes from @image at @addr in @asid into @buffer.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ * Returns -pte_internal if @image, @isid, @buffer, or @asid is NULL.
+ * Returns -pte_nomap if the section does not contain @addr.
+ */
+extern int pt_image_read(struct pt_image *image, int *isid, uint8_t *buffer,
+ uint16_t size, const struct pt_asid *asid,
+ uint64_t addr);
+
+/* Find an image section.
+ *
+ * Find the section containing @vaddr in @asid and provide it in @msec. On
+ * success, takes a reference of @msec->section that the caller needs to put
+ * after use.
+ *
+ * Returns the section's identifier on success, a negative error code otherwise.
+ * Returns -pte_internal if @image, @msec, or @asid is NULL.
+ * Returns -pte_nomap if there is no such section in @image.
+ */
+extern int pt_image_find(struct pt_image *image, struct pt_mapped_section *msec,
+ const struct pt_asid *asid, uint64_t vaddr);
+
+/* Validate an image section.
+ *
+ * Validate that a lookup of @vaddr in @msec->asid in @image would result in
+ * @msec identified by @isid.
+ *
+ * Validation may fail sporadically.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_invalid if @image or @msec is NULL.
+ * Returns -pte_nomap if validation failed.
+ */
+extern int pt_image_validate(const struct pt_image *image,
+ const struct pt_mapped_section *msec,
+ uint64_t vaddr, int isid);
+
+#endif /* PT_IMAGE_H */
diff --git a/libipt/internal/include/pt_image_section_cache.h b/libipt/internal/include/pt_image_section_cache.h
new file mode 100644
index 000000000000..1e7f0d358af2
--- /dev/null
+++ b/libipt/internal/include/pt_image_section_cache.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_IMAGE_SECTION_CACHE_H
+#define PT_IMAGE_SECTION_CACHE_H
+
+#include <stdint.h>
+
+#if defined(FEATURE_THREADS)
+# include <threads.h>
+#endif /* defined(FEATURE_THREADS) */
+
+struct pt_section;
+
+
+/* An image section cache entry. */
+struct pt_iscache_entry {
+ /* The section object.
+ *
+ * We hold a reference to the section - put it when the section is
+ * removed from the cache.
+ */
+ struct pt_section *section;
+
+ /* The base address at which @section has been loaded. */
+ uint64_t laddr;
+};
+
+/* An image section cache least recently used cache entry. */
+struct pt_iscache_lru_entry {
+ /* The next entry in a list ordered by recent use. */
+ struct pt_iscache_lru_entry *next;
+
+ /* The section mapped by the image section cache. */
+ struct pt_section *section;
+
+ /* The amount of memory used by mapping @section in bytes. */
+ uint64_t size;
+};
+
+/* A cache of image sections and their load addresses.
+ *
+ * We combine the section with its load address to reduce the amount of
+ * information we need to store in order to read from a cached section by
+ * virtual address.
+ *
+ * Internally, the section object will be shared if it is loaded at different
+ * addresses in the cache.
+ *
+ * The cache does not consider the address-space the section is mapped into.
+ * This is not relevant for reading from the section.
+ */
+struct pt_image_section_cache {
+ /* The optional name of the cache; NULL if not named. */
+ char *name;
+
+ /* An array of @nentries cached sections. */
+ struct pt_iscache_entry *entries;
+
+ /* A list of mapped sections ordered by time of last access. */
+ struct pt_iscache_lru_entry *lru;
+
+ /* The memory limit for our LRU cache. */
+ uint64_t limit;
+
+ /* The current size of our LRU cache. */
+ uint64_t used;
+
+#if defined(FEATURE_THREADS)
+ /* A lock protecting this image section cache. */
+ mtx_t lock;
+#endif /* defined(FEATURE_THREADS) */
+
+ /* The capacity of the @entries array.
+ *
+ * Cached sections are identified by a positive integer, the image
+ * section identifier (isid), which is derived from their index into the
+ * @entries array.
+ *
+ * We can't expand the section cache capacity beyond INT_MAX.
+ */
+ uint16_t capacity;
+
+ /* The current size of the cache in number of entries.
+ *
+ * This is smaller than @capacity if there is still room in the @entries
+ * array; equal to @capacity if the @entries array is full and needs to
+ * be reallocated.
+ */
+ uint16_t size;
+};
+
+
+/* Initialize an image section cache. */
+extern int pt_iscache_init(struct pt_image_section_cache *iscache,
+ const char *name);
+
+/* Finalize an image section cache. */
+extern void pt_iscache_fini(struct pt_image_section_cache *iscache);
+
+/* Add a section to the cache.
+ *
+ * Adds @section at @laddr to @iscache and returns its isid. If a similar
+ * section is already cached, returns that section's isid, instead.
+ *
+ * We take a full section rather than its filename and range in that file to
+ * avoid the dependency to pt_section.h. Callers are expected to query the
+ * cache before creating the section, so we should only see unnecessary section
+ * creation/destruction on insertion races.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @iscache or @section is NULL.
+ * Returns -pte_internal if @section's filename is NULL.
+ */
+extern int pt_iscache_add(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t laddr);
+
+/* Find a section in the cache.
+ *
+ * Returns a positive isid if a section matching @filename, @offset, @size
+ * loaded at @laddr is found in @iscache.
+ * Returns zero if no such section is found.
+ * Returns a negative error code otherwise.
+ * Returns -pte_internal if @iscache or @filename is NULL.
+ */
+extern int pt_iscache_find(struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset,
+ uint64_t size, uint64_t laddr);
+
+/* Lookup the section identified by its isid.
+ *
+ * Provides a reference to the section in @section and its load address in
+ * @laddr on success. The caller is expected to put the returned section after
+ * use.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @iscache, @section, or @laddr is NULL.
+ * Returns -pte_bad_image if @iscache does not contain @isid.
+ */
+extern int pt_iscache_lookup(struct pt_image_section_cache *iscache,
+ struct pt_section **section, uint64_t *laddr,
+ int isid);
+
+/* Clear an image section cache. */
+extern int pt_iscache_clear(struct pt_image_section_cache *iscache);
+
+/* Notify about the mapping of a cached section.
+ *
+ * Notifies @iscache that @section has been mapped.
+ *
+ * The caller guarantees that @iscache contains @section (by using @section's
+ * iscache pointer) and prevents @iscache from detaching.
+ *
+ * The caller must not lock @section to allow @iscache to map it. This function
+ * must not try to detach from @section.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ * Returns -pte_internal if @iscache or @section is NULL.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_iscache_notify_map(struct pt_image_section_cache *iscache,
+ struct pt_section *section);
+
+/* Notify about a size change of a mapped section.
+ *
+ * Notifies @iscache that @section's size has changed while it was mapped.
+ *
+ * The caller guarantees that @iscache contains @section (by using @section's
+ * iscache pointer) and prevents @iscache from detaching.
+ *
+ * The caller must not lock @section to allow @iscache to map it. This function
+ * must not try to detach from @section.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ * Returns -pte_internal if @iscache or @section is NULL.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_iscache_notify_resize(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t size);
+
+#endif /* PT_IMAGE_SECTION_CACHE_H */
diff --git a/libipt/internal/include/pt_insn.h b/libipt/internal/include/pt_insn.h
new file mode 100644
index 000000000000..22039827daaf
--- /dev/null
+++ b/libipt/internal/include/pt_insn.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_INSN_H
+#define PT_INSN_H
+
+#include <inttypes.h>
+
+#include "intel-pt.h"
+
+struct pt_insn_ext;
+
+
+/* A finer-grain classification of instructions used internally. */
+typedef enum {
+ PTI_INST_INVALID,
+
+ PTI_INST_CALL_9A,
+ PTI_INST_CALL_FFr3,
+ PTI_INST_CALL_FFr2,
+ PTI_INST_CALL_E8,
+ PTI_INST_INT,
+
+ PTI_INST_INT3,
+ PTI_INST_INT1,
+ PTI_INST_INTO,
+ PTI_INST_IRET, /* includes IRETD and IRETQ (EOSZ determines) */
+
+ PTI_INST_JMP_E9,
+ PTI_INST_JMP_EB,
+ PTI_INST_JMP_EA,
+ PTI_INST_JMP_FFr5, /* REXW? */
+ PTI_INST_JMP_FFr4,
+ PTI_INST_JCC,
+ PTI_INST_JrCXZ,
+ PTI_INST_LOOP,
+ PTI_INST_LOOPE, /* aka Z */
+ PTI_INST_LOOPNE, /* aka NE */
+
+ PTI_INST_MOV_CR3,
+
+ PTI_INST_RET_C3,
+ PTI_INST_RET_C2,
+ PTI_INST_RET_CB,
+ PTI_INST_RET_CA,
+
+ PTI_INST_SYSCALL,
+ PTI_INST_SYSENTER,
+ PTI_INST_SYSEXIT,
+ PTI_INST_SYSRET,
+
+ PTI_INST_VMLAUNCH,
+ PTI_INST_VMRESUME,
+ PTI_INST_VMCALL,
+ PTI_INST_VMPTRLD,
+
+ PTI_INST_PTWRITE,
+
+ PTI_INST_LAST
+} pti_inst_enum_t;
+
+/* Information about an instruction we need internally in addition to the
+ * information provided in struct pt_insn.
+ */
+struct pt_insn_ext {
+ /* A more detailed instruction class. */
+ pti_inst_enum_t iclass;
+
+ /* Instruction-specific information. */
+ union {
+ /* For branch instructions. */
+ struct {
+ /* The branch displacement.
+ *
+ * This is only valid for direct calls/jumps.
+ *
+ * The displacement is applied to the address of the
+ * instruction following the branch.
+ */
+ int32_t displacement;
+
+ /* A flag saying whether the branch is direct.
+ *
+ * non-zero: direct
+ * zero: indirect
+ *
+ * This is expected to go away someday when we extend
+ * enum pt_insn_class to distinguish direct and indirect
+ * branches.
+ */
+ uint8_t is_direct;
+ } branch;
+ } variant;
+};
+
+
+/* Check if the instruction @insn/@iext changes the current privilege level.
+ *
+ * Returns non-zero if it does, zero if it doesn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_changes_cpl(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Check if the instruction @insn/@iext changes CR3.
+ *
+ * Returns non-zero if it does, zero if it doesn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_changes_cr3(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Check if the instruction @insn/@iext is a (near or far) branch.
+ *
+ * Returns non-zero if it is, zero if it isn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_is_branch(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Check if the instruction @insn/@iext is a far branch.
+ *
+ * Returns non-zero if it is, zero if it isn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_is_far_branch(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Check if the instruction @insn/@iext binds to a PIP packet.
+ *
+ * Returns non-zero if it does, zero if it doesn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_binds_to_pip(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Check if the instruction @insn/@iext binds to a VMCS packet.
+ *
+ * Returns non-zero if it does, zero if it doesn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_binds_to_vmcs(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Check if the instruction @insn/@iext is a ptwrite instruction.
+ *
+ * Returns non-zero if it is, zero if it isn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_is_ptwrite(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Determine the IP of the next instruction.
+ *
+ * Tries to determine the IP of the next instruction without using trace and
+ * provides it in @ip unless @ip is NULL.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_bad_query if the IP can't be determined.
+ * Returns -pte_internal if @insn or @iext is NULL.
+ */
+extern int pt_insn_next_ip(uint64_t *ip, const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Decode and analyze one instruction.
+ *
+ * Decodes the instructruction at @insn->ip in @insn->mode into @insn and @iext.
+ *
+ * If the instruction can not be decoded using a single memory read in a single
+ * section, sets @insn->truncated and reads the missing bytes from one or more
+ * other sections until either the instruction can be decoded or we're sure it
+ * is invalid.
+ *
+ * Returns the size in bytes on success, a negative error code otherwise.
+ * Returns -pte_bad_insn if the instruction could not be decoded.
+ */
+extern int pt_insn_decode(struct pt_insn *insn, struct pt_insn_ext *iext,
+ struct pt_image *image, const struct pt_asid *asid);
+
+/* Determine if a range of instructions is contiguous.
+ *
+ * Try to proceed from IP @begin to IP @end in @asid without using trace.
+ *
+ * Returns a positive integer if we reach @end from @begin.
+ * Returns zero if we couldn't reach @end within @nsteps steps.
+ * Returns a negative error code otherwise.
+ */
+extern int pt_insn_range_is_contiguous(uint64_t begin, uint64_t end,
+ enum pt_exec_mode mode,
+ struct pt_image *image,
+ const struct pt_asid *asid,
+ size_t nsteps);
+
+#endif /* PT_INSN_H */
diff --git a/libipt/internal/include/pt_insn_decoder.h b/libipt/internal/include/pt_insn_decoder.h
new file mode 100644
index 000000000000..70b47f33e491
--- /dev/null
+++ b/libipt/internal/include/pt_insn_decoder.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_INSN_DECODER_H
+#define PT_INSN_DECODER_H
+
+#include "pt_query_decoder.h"
+#include "pt_image.h"
+#include "pt_retstack.h"
+#include "pt_ild.h"
+#include "pt_msec_cache.h"
+
+#include <inttypes.h>
+
+
+struct pt_insn_decoder {
+ /* The Intel(R) Processor Trace query decoder. */
+ struct pt_query_decoder query;
+
+ /* The configuration flags.
+ *
+ * Those are our flags set by the user. In @query.config.flags, we set
+ * the flags we need for the query decoder.
+ */
+ struct pt_conf_flags flags;
+
+ /* The default image. */
+ struct pt_image default_image;
+
+ /* The image. */
+ struct pt_image *image;
+
+ /* The current cached section. */
+ struct pt_msec_cache scache;
+
+ /* The current address space. */
+ struct pt_asid asid;
+
+ /* The current Intel(R) Processor Trace event. */
+ struct pt_event event;
+
+ /* The call/return stack for ret compression. */
+ struct pt_retstack retstack;
+
+ /* The current instruction.
+ *
+ * This is only valid if @process_insn is set.
+ */
+ struct pt_insn insn;
+ struct pt_insn_ext iext;
+
+ /* The current IP.
+ *
+ * If tracing is disabled, this is the IP at which we assume tracing to
+ * be resumed.
+ */
+ uint64_t ip;
+
+ /* The current execution mode. */
+ enum pt_exec_mode mode;
+
+ /* The status of the last successful decoder query.
+ *
+ * Errors are reported directly; the status is always a non-negative
+ * pt_status_flag bit-vector.
+ */
+ int status;
+
+ /* A collection of flags defining how to proceed flow reconstruction:
+ *
+ * - tracing is enabled.
+ */
+ uint32_t enabled:1;
+
+ /* - process @event. */
+ uint32_t process_event:1;
+
+ /* - instructions are executed speculatively. */
+ uint32_t speculative:1;
+
+ /* - process @insn/@iext.
+ *
+ * We have started processing events binding to @insn/@iext. We have
+ * not yet proceeded past it.
+ *
+ * We will do so in pt_insn_event() after processing all events that
+ * bind to it.
+ */
+ uint32_t process_insn:1;
+
+ /* - a paging event has already been bound to @insn/@iext. */
+ uint32_t bound_paging:1;
+
+ /* - a vmcs event has already been bound to @insn/@iext. */
+ uint32_t bound_vmcs:1;
+
+ /* - a ptwrite event has already been bound to @insn/@iext. */
+ uint32_t bound_ptwrite:1;
+};
+
+
+/* Initialize an instruction flow decoder.
+ *
+ * Returns zero on success; a negative error code otherwise.
+ * Returns -pte_internal, if @decoder is NULL.
+ * Returns -pte_invalid, if @config is NULL.
+ */
+extern int pt_insn_decoder_init(struct pt_insn_decoder *decoder,
+ const struct pt_config *config);
+
+/* Finalize an instruction flow decoder. */
+extern void pt_insn_decoder_fini(struct pt_insn_decoder *decoder);
+
+#endif /* PT_INSN_DECODER_H */
diff --git a/libipt/internal/include/pt_last_ip.h b/libipt/internal/include/pt_last_ip.h
new file mode 100644
index 000000000000..0f4490db2b60
--- /dev/null
+++ b/libipt/internal/include/pt_last_ip.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_LAST_IP_H
+#define PT_LAST_IP_H
+
+#include <stdint.h>
+
+struct pt_packet_ip;
+struct pt_config;
+
+
+/* Keeping track of the last-ip in Intel PT packets. */
+struct pt_last_ip {
+ /* The last IP. */
+ uint64_t ip;
+
+ /* Flags governing the handling of IP updates and queries:
+ *
+ * - we have seen an IP update.
+ */
+ uint32_t have_ip:1;
+ /* - the IP has been suppressed in the last update. */
+ uint32_t suppressed:1;
+};
+
+
+/* Initialize (or reset) the last-ip. */
+extern void pt_last_ip_init(struct pt_last_ip *last_ip);
+
+/* Query the last-ip.
+ *
+ * If @ip is not NULL, provides the last-ip in @ip on success.
+ *
+ * Returns zero on success.
+ * Returns -pte_internal if @last_ip is NULL.
+ * Returns -pte_noip if there is no last-ip.
+ * Returns -pte_ip_suppressed if the last-ip has been suppressed.
+ */
+extern int pt_last_ip_query(uint64_t *ip, const struct pt_last_ip *last_ip);
+
+/* Update last-ip.
+ *
+ * Updates @last_ip based on @packet and, if non-null, @config.
+ *
+ * Returns zero on success.
+ * Returns -pte_internal if @last_ip or @packet is NULL.
+ * Returns -pte_bad_packet if @packet appears to be corrupted.
+ */
+extern int pt_last_ip_update_ip(struct pt_last_ip *last_ip,
+ const struct pt_packet_ip *packet,
+ const struct pt_config *config);
+
+#endif /* PT_LAST_IP_H */
diff --git a/libipt/internal/include/pt_mapped_section.h b/libipt/internal/include/pt_mapped_section.h
new file mode 100644
index 000000000000..7e1016111f88
--- /dev/null
+++ b/libipt/internal/include/pt_mapped_section.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_MAPPED_SECTION_H
+#define PT_MAPPED_SECTION_H
+
+#include "intel-pt.h"
+#include "pt_section.h"
+
+#include <stdint.h>
+
+
+/* A section mapped into memory. */
+struct pt_mapped_section {
+ /* The section that is mapped. */
+ struct pt_section *section;
+
+ /* The address space into which the section is mapped. */
+ struct pt_asid asid;
+
+ /* The virtual address at which the section is mapped. */
+ uint64_t vaddr;
+
+ /* The offset into the section.
+ *
+ * This is normally zero but when @section is split, @offset is added to
+ * the section/file offset when accessing @section.
+ */
+ uint64_t offset;
+
+ /* The size of the section.
+ *
+ * This is normally @section->size but when @section is split, this is
+ * used to determine the size of the sub-section.
+ */
+ uint64_t size;
+};
+
+
+static inline void pt_msec_init(struct pt_mapped_section *msec,
+ struct pt_section *section,
+ const struct pt_asid *asid,
+ uint64_t vaddr, uint64_t offset, uint64_t size)
+{
+ if (!msec)
+ return;
+
+ msec->section = section;
+ msec->vaddr = vaddr;
+ msec->offset = offset;
+ msec->size = size;
+
+ if (asid)
+ msec->asid = *asid;
+ else
+ pt_asid_init(&msec->asid);
+}
+
+/* Destroy a mapped section - does not free @msec->section. */
+static inline void pt_msec_fini(struct pt_mapped_section *msec)
+{
+ (void) msec;
+
+ /* Nothing to do. */
+}
+
+/* Return the virtual address of the beginning of the memory region. */
+static inline uint64_t pt_msec_begin(const struct pt_mapped_section *msec)
+{
+ if (!msec)
+ return 0ull;
+
+ return msec->vaddr;
+}
+
+/* Return the virtual address one byte past the end of the memory region. */
+static inline uint64_t pt_msec_end(const struct pt_mapped_section *msec)
+{
+ if (!msec)
+ return 0ull;
+
+ return msec->vaddr + msec->size;
+}
+
+/* Return the section/file offset. */
+static inline uint64_t pt_msec_offset(const struct pt_mapped_section *msec)
+{
+ if (!msec)
+ return 0ull;
+
+ return msec->offset;
+}
+
+/* Return the section size. */
+static inline uint64_t pt_msec_size(const struct pt_mapped_section *msec)
+{
+ if (!msec)
+ return 0ull;
+
+ return msec->size;
+}
+
+/* Return the underlying section. */
+static inline struct pt_section *
+pt_msec_section(const struct pt_mapped_section *msec)
+{
+ return msec->section;
+}
+
+/* Return an identifier for the address-space the section is mapped into. */
+static inline const struct pt_asid *
+pt_msec_asid(const struct pt_mapped_section *msec)
+{
+ if (!msec)
+ return NULL;
+
+ return &msec->asid;
+}
+
+/* Translate a section/file offset into a virtual address. */
+static inline uint64_t pt_msec_map(const struct pt_mapped_section *msec,
+ uint64_t offset)
+{
+ return (offset - msec->offset) + msec->vaddr;
+}
+
+/* Translate a virtual address into a section/file offset. */
+static inline uint64_t pt_msec_unmap(const struct pt_mapped_section *msec,
+ uint64_t vaddr)
+{
+ return (vaddr - msec->vaddr) + msec->offset;
+}
+
+/* Read memory from a mapped section.
+ *
+ * The caller must check @msec->asid.
+ * The caller must ensure that @msec->section is mapped.
+ *
+ * Returns the number of bytes read on success.
+ * Returns a negative error code otherwise.
+ */
+static inline int pt_msec_read(const struct pt_mapped_section *msec,
+ uint8_t *buffer, uint16_t size,
+ uint64_t vaddr)
+{
+ struct pt_section *section;
+ uint64_t begin, end, mbegin, mend, offset;
+
+ if (!msec)
+ return -pte_internal;
+
+ begin = vaddr;
+ end = begin + size;
+ if (end < begin)
+ end = UINT64_MAX;
+
+ mbegin = pt_msec_begin(msec);
+ mend = pt_msec_end(msec);
+
+ if (begin < mbegin || mend <= begin)
+ return -pte_nomap;
+
+ if (mend < end)
+ end = mend;
+
+ size = (uint16_t) (end - begin);
+
+ section = pt_msec_section(msec);
+ offset = pt_msec_unmap(msec, begin);
+
+ return pt_section_read(section, buffer, size, offset);
+}
+
+#endif /* PT_MAPPED_SECTION_H */
diff --git a/libipt/internal/include/pt_msec_cache.h b/libipt/internal/include/pt_msec_cache.h
new file mode 100644
index 000000000000..43f3813bfbd7
--- /dev/null
+++ b/libipt/internal/include/pt_msec_cache.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_MSEC_CACHE_H
+#define PT_MSEC_CACHE_H
+
+#include "pt_mapped_section.h"
+
+#include "intel-pt.h"
+
+
+/* A single-entry mapped section cache.
+ *
+ * The cached section is implicitly mapped and unmapped. The cache is not
+ * thread-safe.
+ */
+struct pt_msec_cache {
+ /* The cached section.
+ *
+ * The cache is valid if and only if @msec.section is not NULL.
+ *
+ * It needs to be unmapped and put. Use pt_blk_scache_invalidate() to
+ * release the cached section and to invalidate the cache.
+ */
+ struct pt_mapped_section msec;
+
+ /* The section identifier. */
+ int isid;
+};
+
+/* Initialize the cache. */
+extern int pt_msec_cache_init(struct pt_msec_cache *cache);
+
+/* Finalize the cache. */
+extern void pt_msec_cache_fini(struct pt_msec_cache *cache);
+
+/* Invalidate the cache. */
+extern int pt_msec_cache_invalidate(struct pt_msec_cache *cache);
+
+/* Read the cached section.
+ *
+ * If @cache is not empty and @image would find it when looking up @vaddr in
+ * @*pmsec->asid, provide a pointer to the cached section in @pmsec and return
+ * its image section identifier.
+ *
+ * The provided pointer remains valid until @cache is invalidated.
+ *
+ * Returns @*pmsec's isid on success, a negative pt_error_code otherwise.
+ */
+extern int pt_msec_cache_read(struct pt_msec_cache *cache,
+ const struct pt_mapped_section **pmsec,
+ struct pt_image *image, uint64_t vaddr);
+
+/* Fill the cache.
+ *
+ * Look up @vaddr in @asid in @image and cache as well as provide the found
+ * section in @pmsec and return its image section identifier.
+ *
+ * Invalidates @cache.
+ *
+ * The provided pointer remains valid until @cache is invalidated.
+ *
+ * Returns @*pmsec's isid on success, a negative pt_error_code otherwise.
+ */
+extern int pt_msec_cache_fill(struct pt_msec_cache *cache,
+ const struct pt_mapped_section **pmsec,
+ struct pt_image *image,
+ const struct pt_asid *asid, uint64_t vaddr);
+
+#endif /* PT_MSEC_CACHE_H */
diff --git a/libipt/internal/include/pt_opcodes.h b/libipt/internal/include/pt_opcodes.h
new file mode 100644
index 000000000000..93eab79a80ec
--- /dev/null
+++ b/libipt/internal/include/pt_opcodes.h
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_OPCODES_H
+#define PT_OPCODES_H
+
+
+/* A one byte opcode. */
+enum pt_opcode {
+ pt_opc_pad = 0x00,
+ pt_opc_ext = 0x02,
+ pt_opc_psb = pt_opc_ext,
+ pt_opc_tip = 0x0d,
+ pt_opc_tnt_8 = 0x00,
+ pt_opc_tip_pge = 0x11,
+ pt_opc_tip_pgd = 0x01,
+ pt_opc_fup = 0x1d,
+ pt_opc_mode = 0x99,
+ pt_opc_tsc = 0x19,
+ pt_opc_mtc = 0x59,
+ pt_opc_cyc = 0x03,
+
+ /* A free opcode to trigger a decode fault. */
+ pt_opc_bad = 0xd9
+};
+
+/* A one byte extension code for ext opcodes. */
+enum pt_ext_code {
+ pt_ext_psb = 0x82,
+ pt_ext_tnt_64 = 0xa3,
+ pt_ext_pip = 0x43,
+ pt_ext_ovf = 0xf3,
+ pt_ext_psbend = 0x23,
+ pt_ext_cbr = 0x03,
+ pt_ext_tma = 0x73,
+ pt_ext_stop = 0x83,
+ pt_ext_vmcs = 0xc8,
+ pt_ext_ext2 = 0xc3,
+ pt_ext_exstop = 0x62,
+ pt_ext_exstop_ip = 0xe2,
+ pt_ext_mwait = 0xc2,
+ pt_ext_pwre = 0x22,
+ pt_ext_pwrx = 0xa2,
+ pt_ext_ptw = 0x12,
+
+ pt_ext_bad = 0x04
+};
+
+/* A one byte extension 2 code for ext2 extension opcodes. */
+enum pt_ext2_code {
+ pt_ext2_mnt = 0x88,
+
+ pt_ext2_bad = 0x00
+};
+
+/* A one byte opcode mask. */
+enum pt_opcode_mask {
+ pt_opm_tip = 0x1f,
+ pt_opm_tnt_8 = 0x01,
+ pt_opm_tnt_8_shr = 1,
+ pt_opm_fup = pt_opm_tip,
+
+ /* The bit mask for the compression bits in the opcode. */
+ pt_opm_ipc = 0xe0,
+
+ /* The shift right value for ipc bits. */
+ pt_opm_ipc_shr = 5,
+
+ /* The bit mask for the compression bits after shifting. */
+ pt_opm_ipc_shr_mask = 0x7,
+
+ /* Shift counts and masks for decoding the cyc packet. */
+ pt_opm_cyc = 0x03,
+ pt_opm_cyc_ext = 0x04,
+ pt_opm_cyc_bits = 0xf8,
+ pt_opm_cyc_shr = 3,
+ pt_opm_cycx_ext = 0x01,
+ pt_opm_cycx_shr = 1,
+
+ /* The bit mask for the IP bit in the exstop packet. */
+ pt_opm_exstop_ip = 0x80,
+
+ /* The PTW opcode. */
+ pt_opm_ptw = 0x1f,
+
+ /* The bit mask for the IP bit in the ptw packet. */
+ pt_opm_ptw_ip = 0x80,
+
+ /* The bit mask and shr value for the payload bytes field in ptw. */
+ pt_opm_ptw_pb = 0x60,
+ pt_opm_ptw_pb_shr = 5,
+
+ /* The bit mask for the payload bytes field in ptw after shifting. */
+ pt_opm_ptw_pb_shr_mask = 0x3
+};
+
+/* The size of the various opcodes in bytes. */
+enum pt_opcode_size {
+ pt_opcs_pad = 1,
+ pt_opcs_tip = 1,
+ pt_opcs_tip_pge = 1,
+ pt_opcs_tip_pgd = 1,
+ pt_opcs_fup = 1,
+ pt_opcs_tnt_8 = 1,
+ pt_opcs_mode = 1,
+ pt_opcs_tsc = 1,
+ pt_opcs_mtc = 1,
+ pt_opcs_cyc = 1,
+ pt_opcs_psb = 2,
+ pt_opcs_psbend = 2,
+ pt_opcs_ovf = 2,
+ pt_opcs_pip = 2,
+ pt_opcs_tnt_64 = 2,
+ pt_opcs_cbr = 2,
+ pt_opcs_tma = 2,
+ pt_opcs_stop = 2,
+ pt_opcs_vmcs = 2,
+ pt_opcs_mnt = 3,
+ pt_opcs_exstop = 2,
+ pt_opcs_mwait = 2,
+ pt_opcs_pwre = 2,
+ pt_opcs_pwrx = 2,
+ pt_opcs_ptw = 2
+};
+
+/* The psb magic payload.
+ *
+ * The payload is a repeating 2-byte pattern.
+ */
+enum pt_psb_pattern {
+ /* The high and low bytes in the pattern. */
+ pt_psb_hi = pt_opc_psb,
+ pt_psb_lo = pt_ext_psb,
+
+ /* Various combinations of the above parts. */
+ pt_psb_lohi = pt_psb_lo | pt_psb_hi << 8,
+ pt_psb_hilo = pt_psb_hi | pt_psb_lo << 8,
+
+ /* The repeat count of the payload, not including opc and ext. */
+ pt_psb_repeat_count = 7,
+
+ /* The size of the repeated pattern in bytes. */
+ pt_psb_repeat_size = 2
+};
+
+/* The payload details. */
+enum pt_payload {
+ /* The shift counts for post-processing the PIP payload. */
+ pt_pl_pip_shr = 1,
+ pt_pl_pip_shl = 5,
+
+ /* The size of a PIP payload in bytes. */
+ pt_pl_pip_size = 6,
+
+ /* The non-root bit in the first byte of the PIP payload. */
+ pt_pl_pip_nr = 0x01,
+
+ /* The size of a 8bit TNT packet's payload in bits. */
+ pt_pl_tnt_8_bits = 8 - pt_opm_tnt_8_shr,
+
+ /* The size of a 64bit TNT packet's payload in bytes. */
+ pt_pl_tnt_64_size = 6,
+
+ /* The size of a 64bit TNT packet's payload in bits. */
+ pt_pl_tnt_64_bits = 48,
+
+ /* The size of a TSC packet's payload in bytes and in bits. */
+ pt_pl_tsc_size = 7,
+ pt_pl_tsc_bit_size = pt_pl_tsc_size * 8,
+
+ /* The size of a CBR packet's payload in bytes. */
+ pt_pl_cbr_size = 2,
+
+ /* The size of a PSB packet's payload in bytes. */
+ pt_pl_psb_size = pt_psb_repeat_count * pt_psb_repeat_size,
+
+ /* The size of a MODE packet's payload in bytes. */
+ pt_pl_mode_size = 1,
+
+ /* The size of an IP packet's payload with update-16 compression. */
+ pt_pl_ip_upd16_size = 2,
+
+ /* The size of an IP packet's payload with update-32 compression. */
+ pt_pl_ip_upd32_size = 4,
+
+ /* The size of an IP packet's payload with update-48 compression. */
+ pt_pl_ip_upd48_size = 6,
+
+ /* The size of an IP packet's payload with sext-48 compression. */
+ pt_pl_ip_sext48_size = 6,
+
+ /* The size of an IP packet's payload with full-ip compression. */
+ pt_pl_ip_full_size = 8,
+
+ /* Byte locations, sizes, and masks for processing TMA packets. */
+ pt_pl_tma_size = 5,
+ pt_pl_tma_ctc_size = 2,
+ pt_pl_tma_ctc_bit_size = pt_pl_tma_ctc_size * 8,
+ pt_pl_tma_ctc_0 = 2,
+ pt_pl_tma_ctc_1 = 3,
+ pt_pl_tma_ctc_mask = (1 << pt_pl_tma_ctc_bit_size) - 1,
+ pt_pl_tma_fc_size = 2,
+ pt_pl_tma_fc_bit_size = 9,
+ pt_pl_tma_fc_0 = 5,
+ pt_pl_tma_fc_1 = 6,
+ pt_pl_tma_fc_mask = (1 << pt_pl_tma_fc_bit_size) - 1,
+
+ /* The size of a MTC packet's payload in bytes and in bits. */
+ pt_pl_mtc_size = 1,
+ pt_pl_mtc_bit_size = pt_pl_mtc_size * 8,
+
+ /* A mask for the MTC payload bits. */
+ pt_pl_mtc_mask = (1 << pt_pl_mtc_bit_size) - 1,
+
+ /* The maximal payload size in bytes of a CYC packet. */
+ pt_pl_cyc_max_size = 15,
+
+ /* The size of a VMCS packet's payload in bytes. */
+ pt_pl_vmcs_size = 5,
+
+ /* The shift counts for post-processing the VMCS payload. */
+ pt_pl_vmcs_shl = 12,
+
+ /* The size of a MNT packet's payload in bytes. */
+ pt_pl_mnt_size = 8,
+
+ /* The bit-mask for the IP bit in the EXSTOP opcode extension. */
+ pt_pl_exstop_ip_mask = 0x80,
+
+ /* The size of the hints field in the MWAIT payload in bytes. */
+ pt_pl_mwait_hints_size = 4,
+
+ /* The size of the extensions field in the MWAIT payload in bytes. */
+ pt_pl_mwait_ext_size = 4,
+
+ /* The size of the MWAIT payload in bytes. */
+ pt_pl_mwait_size = pt_pl_mwait_hints_size + pt_pl_mwait_ext_size,
+
+ /* The size of the PWRE payload in bytes. */
+ pt_pl_pwre_size = 2,
+
+ /* The bit-mask for the h/w bit in the PWRE payload. */
+ pt_pl_pwre_hw_mask = 0x8,
+
+ /* The bit-mask for the resolved thread sub C-state in the PWRE
+ * payload.
+ */
+ pt_pl_pwre_sub_state_mask = 0xf00,
+
+ /* The shift right value for the resolved thread sub C-state in the
+ * PWRE payload.
+ */
+ pt_pl_pwre_sub_state_shr = 8,
+
+ /* The bit-mask for the resolved thread C-state in the PWRE payload. */
+ pt_pl_pwre_state_mask = 0xf000,
+
+ /* The shift right value for the resolved thread C-state in the
+ * PWRE payload.
+ */
+ pt_pl_pwre_state_shr = 12,
+
+ /* The size of the PWRX payload in bytes. */
+ pt_pl_pwrx_size = 5,
+
+ /* The bit-mask for the deepest core C-state in the PWRX payload. */
+ pt_pl_pwrx_deepest_mask = 0xf,
+
+ /* The shift right value for the deepest core C-state in the PWRX
+ * payload.
+ */
+ pt_pl_pwrx_deepest_shr = 0,
+
+ /* The bit-mask for the last core C-state in the PWRX payload. */
+ pt_pl_pwrx_last_mask = 0xf0,
+
+ /* The shift right value for the last core C-state in the PWRX
+ * payload.
+ */
+ pt_pl_pwrx_last_shr = 4,
+
+ /* The bit-mask for the wake reason in the PWRX payload. */
+ pt_pl_pwrx_wr_mask = 0xf00,
+
+ /* The shift right value for the wake reason in the PWRX payload. */
+ pt_pl_pwrx_wr_shr = 8,
+
+ /* The bit-mask for the interrupt wake reason in the PWRX payload. */
+ pt_pl_pwrx_wr_int = 0x100,
+
+ /* The bit-mask for the store wake reason in the PWRX payload. */
+ pt_pl_pwrx_wr_store = 0x400,
+
+ /* The bit-mask for the autonomous wake reason in the PWRX payload. */
+ pt_pl_pwrx_wr_hw = 0x800
+};
+
+/* Mode packet masks. */
+enum pt_mode_mask {
+ pt_mom_leaf = 0xe0,
+ pt_mom_leaf_shr = 5,
+ pt_mom_bits = 0x1f
+};
+
+/* Mode packet bits. */
+enum pt_mode_bit {
+ /* mode.exec */
+ pt_mob_exec_csl = 0x01,
+ pt_mob_exec_csd = 0x02,
+
+ /* mode.tsx */
+ pt_mob_tsx_intx = 0x01,
+ pt_mob_tsx_abrt = 0x02
+};
+
+/* The size of the various packets in bytes. */
+enum pt_packet_size {
+ ptps_pad = pt_opcs_pad,
+ ptps_tnt_8 = pt_opcs_tnt_8,
+ ptps_mode = pt_opcs_mode + pt_pl_mode_size,
+ ptps_tsc = pt_opcs_tsc + pt_pl_tsc_size,
+ ptps_mtc = pt_opcs_mtc + pt_pl_mtc_size,
+ ptps_psb = pt_opcs_psb + pt_pl_psb_size,
+ ptps_psbend = pt_opcs_psbend,
+ ptps_ovf = pt_opcs_ovf,
+ ptps_pip = pt_opcs_pip + pt_pl_pip_size,
+ ptps_tnt_64 = pt_opcs_tnt_64 + pt_pl_tnt_64_size,
+ ptps_cbr = pt_opcs_cbr + pt_pl_cbr_size,
+ ptps_tip_supp = pt_opcs_tip,
+ ptps_tip_upd16 = pt_opcs_tip + pt_pl_ip_upd16_size,
+ ptps_tip_upd32 = pt_opcs_tip + pt_pl_ip_upd32_size,
+ ptps_tip_upd48 = pt_opcs_tip + pt_pl_ip_upd48_size,
+ ptps_tip_sext48 = pt_opcs_tip + pt_pl_ip_sext48_size,
+ ptps_tip_full = pt_opcs_tip + pt_pl_ip_full_size,
+ ptps_tip_pge_supp = pt_opcs_tip_pge,
+ ptps_tip_pge_upd16 = pt_opcs_tip_pge + pt_pl_ip_upd16_size,
+ ptps_tip_pge_upd32 = pt_opcs_tip_pge + pt_pl_ip_upd32_size,
+ ptps_tip_pge_upd48 = pt_opcs_tip_pge + pt_pl_ip_upd48_size,
+ ptps_tip_pge_sext48 = pt_opcs_tip_pge + pt_pl_ip_sext48_size,
+ ptps_tip_pge_full = pt_opcs_tip_pge + pt_pl_ip_full_size,
+ ptps_tip_pgd_supp = pt_opcs_tip_pgd,
+ ptps_tip_pgd_upd16 = pt_opcs_tip_pgd + pt_pl_ip_upd16_size,
+ ptps_tip_pgd_upd32 = pt_opcs_tip_pgd + pt_pl_ip_upd32_size,
+ ptps_tip_pgd_upd48 = pt_opcs_tip_pgd + pt_pl_ip_upd48_size,
+ ptps_tip_pgd_sext48 = pt_opcs_tip_pgd + pt_pl_ip_sext48_size,
+ ptps_tip_pgd_full = pt_opcs_tip_pgd + pt_pl_ip_full_size,
+ ptps_fup_supp = pt_opcs_fup,
+ ptps_fup_upd16 = pt_opcs_fup + pt_pl_ip_upd16_size,
+ ptps_fup_upd32 = pt_opcs_fup + pt_pl_ip_upd32_size,
+ ptps_fup_upd48 = pt_opcs_fup + pt_pl_ip_upd48_size,
+ ptps_fup_sext48 = pt_opcs_fup + pt_pl_ip_sext48_size,
+ ptps_fup_full = pt_opcs_fup + pt_pl_ip_full_size,
+ ptps_tma = pt_opcs_tma + pt_pl_tma_size,
+ ptps_stop = pt_opcs_stop,
+ ptps_vmcs = pt_opcs_vmcs + pt_pl_vmcs_size,
+ ptps_mnt = pt_opcs_mnt + pt_pl_mnt_size,
+ ptps_exstop = pt_opcs_exstop,
+ ptps_mwait = pt_opcs_mwait + pt_pl_mwait_size,
+ ptps_pwre = pt_opcs_pwre + pt_pl_pwre_size,
+ ptps_pwrx = pt_opcs_pwrx + pt_pl_pwrx_size,
+ ptps_ptw_32 = pt_opcs_ptw + 4,
+ ptps_ptw_64 = pt_opcs_ptw + 8
+};
+
+/* Supported address range configurations. */
+enum pt_addr_cfg {
+ pt_addr_cfg_disabled = 0,
+ pt_addr_cfg_filter = 1,
+ pt_addr_cfg_stop = 2
+};
+
+#endif /* PT_OPCODES_H */
diff --git a/libipt/internal/include/pt_packet.h b/libipt/internal/include/pt_packet.h
new file mode 100644
index 000000000000..ed4fc63c4600
--- /dev/null
+++ b/libipt/internal/include/pt_packet.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_PACKET_H
+#define PT_PACKET_H
+
+#include <stdint.h>
+
+struct pt_config;
+struct pt_packet;
+struct pt_packet_ip;
+struct pt_packet_tnt;
+struct pt_packet_pip;
+struct pt_packet_mode;
+struct pt_packet_tsc;
+struct pt_packet_cbr;
+struct pt_packet_tma;
+struct pt_packet_mtc;
+struct pt_packet_cyc;
+struct pt_packet_vmcs;
+struct pt_packet_mnt;
+struct pt_packet_exstop;
+struct pt_packet_mwait;
+struct pt_packet_pwre;
+struct pt_packet_pwrx;
+struct pt_packet_ptw;
+
+
+/* Read the payload of an Intel PT packet.
+ *
+ * Reads the payload of the packet starting at @pos into @packet.
+ *
+ * For pt_pkt_read_psb(), the @packet parameter is omitted; the function
+ * validates that the payload matches the expected PSB pattern.
+ *
+ * Decoding an unknown packet uses @config's decode callback. If the callback
+ * is NULL, pt_pkt_read_unknown() returns -pte_bad_opc.
+ *
+ * Beware that the packet opcode is not checked. The caller is responsible
+ * for checking the opcode and calling the correct packet read function.
+ *
+ * Returns the packet size on success, a negative error code otherwise.
+ * Returns -pte_bad_packet if the packet payload is corrupt.
+ * Returns -pte_eos if the packet does not fit into the trace buffer.
+ * Returns -pte_internal if @packet, @pos, or @config is NULL.
+ */
+extern int pt_pkt_read_unknown(struct pt_packet *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_psb(const uint8_t *pos, const struct pt_config *config);
+extern int pt_pkt_read_ip(struct pt_packet_ip *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_tnt_8(struct pt_packet_tnt *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_tnt_64(struct pt_packet_tnt *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_pip(struct pt_packet_pip *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_mode(struct pt_packet_mode *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_tsc(struct pt_packet_tsc *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_cbr(struct pt_packet_cbr *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_tma(struct pt_packet_tma *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_mtc(struct pt_packet_mtc *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_cyc(struct pt_packet_cyc *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_vmcs(struct pt_packet_vmcs *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_mnt(struct pt_packet_mnt *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_exstop(struct pt_packet_exstop *packet,
+ const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_mwait(struct pt_packet_mwait *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_pwre(struct pt_packet_pwre *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_pwrx(struct pt_packet_pwrx *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_ptw(struct pt_packet_ptw *packet, const uint8_t *pos,
+ const struct pt_config *config);
+
+#endif /* PT_PACKET_H */
diff --git a/libipt/internal/include/pt_packet_decoder.h b/libipt/internal/include/pt_packet_decoder.h
new file mode 100644
index 000000000000..2c114310c84b
--- /dev/null
+++ b/libipt/internal/include/pt_packet_decoder.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_PACKET_DECODER_H
+#define PT_PACKET_DECODER_H
+
+#include "intel-pt.h"
+
+
+/* An Intel PT packet decoder. */
+struct pt_packet_decoder {
+ /* The decoder configuration. */
+ struct pt_config config;
+
+ /* The current position in the trace buffer. */
+ const uint8_t *pos;
+
+ /* The position of the last PSB packet. */
+ const uint8_t *sync;
+};
+
+
+/* Initialize the packet decoder.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern int pt_pkt_decoder_init(struct pt_packet_decoder *,
+ const struct pt_config *);
+
+/* Finalize the packet decoder. */
+extern void pt_pkt_decoder_fini(struct pt_packet_decoder *);
+
+
+/* Decoder functions for the packet decoder. */
+extern int pt_pkt_decode_unknown(struct pt_packet_decoder *,
+ struct pt_packet *);
+extern int pt_pkt_decode_pad(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_psb(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_tip(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_tnt_8(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_tnt_64(struct pt_packet_decoder *,
+ struct pt_packet *);
+extern int pt_pkt_decode_tip_pge(struct pt_packet_decoder *,
+ struct pt_packet *);
+extern int pt_pkt_decode_tip_pgd(struct pt_packet_decoder *,
+ struct pt_packet *);
+extern int pt_pkt_decode_fup(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_pip(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_ovf(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_mode(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_psbend(struct pt_packet_decoder *,
+ struct pt_packet *);
+extern int pt_pkt_decode_tsc(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_cbr(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_tma(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_mtc(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_cyc(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_stop(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_vmcs(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_mnt(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_exstop(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_mwait(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_pwre(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_pwrx(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_ptw(struct pt_packet_decoder *, struct pt_packet *);
+
+#endif /* PT_PACKET_DECODER_H */
diff --git a/libipt/internal/include/pt_query_decoder.h b/libipt/internal/include/pt_query_decoder.h
new file mode 100644
index 000000000000..355338feab1c
--- /dev/null
+++ b/libipt/internal/include/pt_query_decoder.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_QUERY_DECODER_H
+#define PT_QUERY_DECODER_H
+
+#include "pt_last_ip.h"
+#include "pt_tnt_cache.h"
+#include "pt_time.h"
+#include "pt_event_queue.h"
+
+#include "intel-pt.h"
+
+struct pt_decoder_function;
+
+
+/* An Intel PT query decoder. */
+struct pt_query_decoder {
+ /* The decoder configuration. */
+ struct pt_config config;
+
+ /* The current position in the trace buffer. */
+ const uint8_t *pos;
+
+ /* The position of the last PSB packet. */
+ const uint8_t *sync;
+
+ /* The decoding function for the next packet. */
+ const struct pt_decoder_function *next;
+
+ /* The last-ip. */
+ struct pt_last_ip ip;
+
+ /* The cached tnt indicators. */
+ struct pt_tnt_cache tnt;
+
+ /* Timing information. */
+ struct pt_time time;
+
+ /* The time at the last query (before reading ahead). */
+ struct pt_time last_time;
+
+ /* Timing calibration. */
+ struct pt_time_cal tcal;
+
+ /* Pending (incomplete) events. */
+ struct pt_event_queue evq;
+
+ /* The current event. */
+ struct pt_event *event;
+
+ /* A collection of flags relevant for decoding:
+ *
+ * - tracing is enabled.
+ */
+ uint32_t enabled:1;
+
+ /* - consume the current packet. */
+ uint32_t consume_packet:1;
+};
+
+/* Initialize the query decoder.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern int pt_qry_decoder_init(struct pt_query_decoder *,
+ const struct pt_config *);
+
+/* Finalize the query decoder. */
+extern void pt_qry_decoder_fini(struct pt_query_decoder *);
+
+/* Decoder functions (tracing context). */
+extern int pt_qry_decode_unknown(struct pt_query_decoder *);
+extern int pt_qry_decode_pad(struct pt_query_decoder *);
+extern int pt_qry_decode_psb(struct pt_query_decoder *);
+extern int pt_qry_decode_tip(struct pt_query_decoder *);
+extern int pt_qry_decode_tnt_8(struct pt_query_decoder *);
+extern int pt_qry_decode_tnt_64(struct pt_query_decoder *);
+extern int pt_qry_decode_tip_pge(struct pt_query_decoder *);
+extern int pt_qry_decode_tip_pgd(struct pt_query_decoder *);
+extern int pt_qry_decode_fup(struct pt_query_decoder *);
+extern int pt_qry_decode_pip(struct pt_query_decoder *);
+extern int pt_qry_decode_ovf(struct pt_query_decoder *);
+extern int pt_qry_decode_mode(struct pt_query_decoder *);
+extern int pt_qry_decode_psbend(struct pt_query_decoder *);
+extern int pt_qry_decode_tsc(struct pt_query_decoder *);
+extern int pt_qry_header_tsc(struct pt_query_decoder *);
+extern int pt_qry_decode_cbr(struct pt_query_decoder *);
+extern int pt_qry_header_cbr(struct pt_query_decoder *);
+extern int pt_qry_decode_tma(struct pt_query_decoder *);
+extern int pt_qry_decode_mtc(struct pt_query_decoder *);
+extern int pt_qry_decode_cyc(struct pt_query_decoder *);
+extern int pt_qry_decode_stop(struct pt_query_decoder *);
+extern int pt_qry_decode_vmcs(struct pt_query_decoder *);
+extern int pt_qry_decode_mnt(struct pt_query_decoder *);
+extern int pt_qry_decode_exstop(struct pt_query_decoder *);
+extern int pt_qry_decode_mwait(struct pt_query_decoder *);
+extern int pt_qry_decode_pwre(struct pt_query_decoder *);
+extern int pt_qry_decode_pwrx(struct pt_query_decoder *);
+extern int pt_qry_decode_ptw(struct pt_query_decoder *);
+
+/* Decoder functions (header context). */
+extern int pt_qry_header_fup(struct pt_query_decoder *);
+extern int pt_qry_header_pip(struct pt_query_decoder *);
+extern int pt_qry_header_mode(struct pt_query_decoder *);
+extern int pt_qry_header_vmcs(struct pt_query_decoder *);
+extern int pt_qry_header_mnt(struct pt_query_decoder *);
+
+#endif /* PT_QUERY_DECODER_H */
diff --git a/libipt/internal/include/pt_retstack.h b/libipt/internal/include/pt_retstack.h
new file mode 100644
index 000000000000..c68a782a7dbe
--- /dev/null
+++ b/libipt/internal/include/pt_retstack.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_RETSTACK_H
+#define PT_RETSTACK_H
+
+#include <stdint.h>
+
+
+/* The size of the call/return stack in number of entries. */
+enum {
+ pt_retstack_size = 64
+};
+
+/* A stack of return addresses used for return compression. */
+struct pt_retstack {
+ /* The stack of return addresses.
+ *
+ * We use one additional entry in order to distinguish a full from
+ * an empty stack.
+ */
+ uint64_t stack[pt_retstack_size + 1];
+
+ /* The top of the stack. */
+ uint8_t top;
+
+ /* The bottom of the stack. */
+ uint8_t bottom;
+};
+
+/* Initialize (or reset) a call/return stack. */
+extern void pt_retstack_init(struct pt_retstack *);
+
+/* Test a call/return stack for emptiness.
+ *
+ * Returns zero if @retstack contains at least one element.
+ * Returns a positive integer if @retstack is empty.
+ * Returns -pte_invalid if @retstack is NULL.
+ */
+extern int pt_retstack_is_empty(const struct pt_retstack *retstack);
+
+/* Pop and return the topmost IP.
+ *
+ * If @ip is not NULL, provides the topmost return address on success.
+ * If @retstack is not empty, pops the topmost return address on success.
+ *
+ * Returns zero on success.
+ * Returns -pte_invalid if @retstack is NULL.
+ * Returns -pte_noip if @retstack is empty.
+ */
+extern int pt_retstack_pop(struct pt_retstack *retstack, uint64_t *ip);
+
+/* Push a return address onto the stack.
+ *
+ * Pushes @ip onto @retstack.
+ * If @retstack is full, drops the oldest return address.
+ *
+ * Returns zero on success.
+ */
+extern int pt_retstack_push(struct pt_retstack *retstack, uint64_t ip);
+
+#endif /* PT_RETSTACK_H */
diff --git a/libipt/internal/include/pt_section.h b/libipt/internal/include/pt_section.h
new file mode 100644
index 000000000000..df9200e19770
--- /dev/null
+++ b/libipt/internal/include/pt_section.h
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SECTION_H
+#define PT_SECTION_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+#if defined(FEATURE_THREADS)
+# include <threads.h>
+#endif /* defined(FEATURE_THREADS) */
+
+#include "intel-pt.h"
+
+struct pt_block_cache;
+
+
+/* A section of contiguous memory loaded from a file. */
+struct pt_section {
+ /* The name of the file. */
+ char *filename;
+
+ /* The offset into the file. */
+ uint64_t offset;
+
+ /* The (adjusted) size in bytes. The size is truncated to match the
+ * actual file size.
+ */
+ uint64_t size;
+
+ /* A pointer to OS-specific file status for detecting changes.
+ *
+ * The status is initialized on first pt_section_map() and will be
+ * left in the section until the section is destroyed. This field
+ * is owned by the OS-specific mmap-based section implementation.
+ */
+ void *status;
+
+ /* A pointer to implementation-specific mapping information - NULL if
+ * the section is currently not mapped.
+ *
+ * This field is set in pt_section_map() and owned by the mapping
+ * implementation.
+ */
+ void *mapping;
+
+ /* A pointer to an optional block cache.
+ *
+ * The cache is created on request and destroyed implicitly when the
+ * section is unmapped.
+ *
+ * We read this field without locking and only lock the section in order
+ * to install the block cache.
+ *
+ * We rely on guaranteed atomic operations as specified in section 8.1.1
+ * in Volume 3A of the Intel(R) Software Developer's Manual at
+ * http://www.intel.com/sdm.
+ */
+ struct pt_block_cache *bcache;
+
+ /* A pointer to the iscache attached to this section.
+ *
+ * The pointer is initialized when the iscache attaches and cleared when
+ * it detaches again. There can be at most one iscache attached to this
+ * section at any time.
+ *
+ * In addition to attaching, the iscache will need to obtain a reference
+ * to the section, which it needs to drop again after detaching.
+ */
+ struct pt_image_section_cache *iscache;
+
+ /* A pointer to the unmap function - NULL if the section is currently
+ * not mapped.
+ *
+ * This field is set in pt_section_map() and owned by the mapping
+ * implementation.
+ */
+ int (*unmap)(struct pt_section *sec);
+
+ /* A pointer to the read function - NULL if the section is currently
+ * not mapped.
+ *
+ * This field is set in pt_section_map() and owned by the mapping
+ * implementation.
+ */
+ int (*read)(const struct pt_section *sec, uint8_t *buffer,
+ uint16_t size, uint64_t offset);
+
+ /* A pointer to the memsize function - NULL if the section is currently
+ * not mapped.
+ *
+ * This field is set in pt_section_map() and owned by the mapping
+ * implementation.
+ */
+ int (*memsize)(const struct pt_section *section, uint64_t *size);
+
+#if defined(FEATURE_THREADS)
+ /* A lock protecting this section.
+ *
+ * Most operations do not require the section to be locked. All
+ * actual locking should be handled by pt_section_* functions.
+ */
+ mtx_t lock;
+
+ /* A lock protecting the @iscache and @acount fields.
+ *
+ * We need separate locks to protect against a deadlock scenario when
+ * the iscache is mapping or unmapping this section.
+ *
+ * The attach lock must not be taken while holding the section lock; the
+ * other way round is OK.
+ */
+ mtx_t alock;
+#endif /* defined(FEATURE_THREADS) */
+
+ /* The number of current users. The last user destroys the section. */
+ uint16_t ucount;
+
+ /* The number of attaches. This must be <= @ucount. */
+ uint16_t acount;
+
+ /* The number of current mappers. The last unmaps the section. */
+ uint16_t mcount;
+};
+
+/* Create a section.
+ *
+ * The returned section describes the contents of @file starting at @offset
+ * for @size bytes.
+ *
+ * If @file is shorter than the requested @size, the section is silently
+ * truncated to the size of @file.
+ *
+ * If @offset lies beyond the end of @file, no section is created.
+ *
+ * The returned section is not mapped and starts with a user count of one and
+ * instruction caching enabled.
+ *
+ * Returns a new section on success, NULL otherwise.
+ */
+extern struct pt_section *pt_mk_section(const char *file, uint64_t offset,
+ uint64_t size);
+
+/* Lock a section.
+ *
+ * Locks @section. The section must not be locked.
+ *
+ * Returns a new section on success, NULL otherwise.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_lock(struct pt_section *section);
+
+/* Unlock a section.
+ *
+ * Unlocks @section. The section must be locked.
+ *
+ * Returns a new section on success, NULL otherwise.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_unlock(struct pt_section *section);
+
+/* Add another user.
+ *
+ * Increments the user count of @section.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_overflow if the user count would overflow.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_get(struct pt_section *section);
+
+/* Remove a user.
+ *
+ * Decrements the user count of @section. Destroys the section if the
+ * count reaches zero.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_internal if the user count is already zero.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_put(struct pt_section *section);
+
+/* Attaches the image section cache user.
+ *
+ * Similar to pt_section_get() but sets @section->iscache to @iscache.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @iscache is NULL.
+ * Returns -pte_internal if a different cache is already attached.
+ * Returns -pte_overflow if the attach count would overflow.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_attach(struct pt_section *section,
+ struct pt_image_section_cache *iscache);
+
+/* Detaches the image section cache user.
+ *
+ * Similar to pt_section_put() but clears @section->iscache.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @iscache is NULL.
+ * Returns -pte_internal if the attach count is already zero.
+ * Returns -pte_internal if @section->iscache is not equal to @iscache.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_detach(struct pt_section *section,
+ struct pt_image_section_cache *iscache);
+
+/* Return the filename of @section. */
+extern const char *pt_section_filename(const struct pt_section *section);
+
+/* Return the offset of the section in bytes. */
+extern uint64_t pt_section_offset(const struct pt_section *section);
+
+/* Return the size of the section in bytes. */
+extern uint64_t pt_section_size(const struct pt_section *section);
+
+/* Return the amount of memory currently used by the section in bytes.
+ *
+ * We only consider the amount of memory required for mapping @section; we
+ * ignore the size of the section object itself and the size of the status
+ * object.
+ *
+ * If @section is currently not mapped, the size is zero.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ * Returns -pte_internal if @size of @section is NULL.
+ */
+extern int pt_section_memsize(struct pt_section *section, uint64_t *size);
+
+/* Allocate a block cache.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_nomem if the block cache can't be allocated.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_alloc_bcache(struct pt_section *section);
+
+/* Request block caching.
+ *
+ * The caller must ensure that @section is mapped.
+ */
+static inline int pt_section_request_bcache(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+ if (section->bcache)
+ return 0;
+
+ return pt_section_alloc_bcache(section);
+}
+
+/* Return @section's block cache, if available.
+ *
+ * The caller must ensure that @section is mapped.
+ *
+ * The cache is not use-counted. It is only valid as long as the caller keeps
+ * @section mapped.
+ */
+static inline struct pt_block_cache *
+pt_section_bcache(const struct pt_section *section)
+{
+ if (!section)
+ return NULL;
+
+ return section->bcache;
+}
+
+/* Create the OS-specific file status.
+ *
+ * On success, allocates a status object, provides a pointer to it in @pstatus
+ * and provides the file size in @psize.
+ *
+ * The status object will be free()'ed when its section is.
+ *
+ * This function is implemented in the OS-specific section implementation.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @pstatus, @psize, or @filename is NULL.
+ * Returns -pte_bad_image if @filename can't be opened.
+ * Returns -pte_nomem if the status object can't be allocated.
+ */
+extern int pt_section_mk_status(void **pstatus, uint64_t *psize,
+ const char *filename);
+
+/* Perform on-map maintenance work.
+ *
+ * Notifies an attached image section cache about the mapping of @section.
+ *
+ * This function is called by the OS-specific pt_section_map() implementation
+ * after @section has been successfully mapped and @section has been unlocked.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_on_map_lock(struct pt_section *section);
+
+static inline int pt_section_on_map(struct pt_section *section)
+{
+ if (section && !section->iscache)
+ return 0;
+
+ return pt_section_on_map_lock(section);
+}
+
+/* Map a section.
+ *
+ * Maps @section into memory. Mappings are use-counted. The number of
+ * pt_section_map() calls must match the number of pt_section_unmap()
+ * calls.
+ *
+ * This function is implemented in the OS-specific section implementation.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_bad_image if @section changed or can't be opened.
+ * Returns -pte_bad_lock on any locking error.
+ * Returns -pte_nomem if @section can't be mapped into memory.
+ * Returns -pte_overflow if the map count would overflow.
+ */
+extern int pt_section_map(struct pt_section *section);
+
+/* Share a section mapping.
+ *
+ * Increases the map count for @section without notifying an attached image
+ * section cache.
+ *
+ * This function should only be used by the attached image section cache to
+ * resolve a deadlock scenario when mapping a section it intends to cache.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_internal if @section->mcount is zero.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_map_share(struct pt_section *section);
+
+/* Unmap a section.
+ *
+ * Unmaps @section from memory.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_bad_lock on any locking error.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_section_unmap(struct pt_section *section);
+
+/* Read memory from a section.
+ *
+ * Reads at most @size bytes from @section at @offset into @buffer. @section
+ * must be mapped.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @buffer are NULL.
+ * Returns -pte_nomap if @offset is beyond the end of the section.
+ */
+extern int pt_section_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset);
+
+#endif /* PT_SECTION_H */
diff --git a/libipt/internal/include/pt_section_file.h b/libipt/internal/include/pt_section_file.h
new file mode 100644
index 000000000000..9b266dbba0f3
--- /dev/null
+++ b/libipt/internal/include/pt_section_file.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SECTION_FILE_H
+#define PT_SECTION_FILE_H
+
+#include <stdio.h>
+#include <stdint.h>
+
+#if defined(FEATURE_THREADS)
+# include <threads.h>
+#endif /* defined(FEATURE_THREADS) */
+
+struct pt_section;
+
+
+/* File-based section mapping information. */
+struct pt_sec_file_mapping {
+ /* The FILE pointer. */
+ FILE *file;
+
+ /* The begin and end of the section as offset into @file. */
+ long begin, end;
+
+#if defined(FEATURE_THREADS)
+ /* A lock protecting read access to this file.
+ *
+ * Since we need to first set the file position indication before
+ * we can read, there's a race on the file position.
+ */
+ mtx_t lock;
+#endif /* defined(FEATURE_THREADS) */
+};
+
+
+/* Map a section based on file operations.
+ *
+ * The caller has already opened the file for reading.
+ *
+ * On success, sets @section's mapping, unmap, and read pointers.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @file are NULL.
+ * Returns -pte_invalid if @section can't be mapped.
+ */
+extern int pt_sec_file_map(struct pt_section *section, FILE *file);
+
+/* Unmap a section based on file operations.
+ *
+ * On success, clears @section's mapping, unmap, and read pointers.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_sec_file_unmap(struct pt_section *section);
+
+/* Read memory from a file based section.
+ *
+ * Reads at most @size bytes from @section at @offset into @buffer.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ * Returns -pte_invalid if @section or @buffer are NULL.
+ * Returns -pte_nomap if @offset is beyond the end of the section.
+ */
+extern int pt_sec_file_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset);
+
+/* Compute the memory size of a section based on file operations.
+ *
+ * On success, provides the amount of memory used for mapping @section in bytes
+ * in @size.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @size is NULL.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_sec_file_memsize(const struct pt_section *section,
+ uint64_t *size);
+
+#endif /* PT_SECTION_FILE_H */
diff --git a/libipt/internal/include/pt_sync.h b/libipt/internal/include/pt_sync.h
new file mode 100644
index 000000000000..8e0c5d527aa1
--- /dev/null
+++ b/libipt/internal/include/pt_sync.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SYNC_H
+#define PT_SYNC_H
+
+#include <stdint.h>
+
+struct pt_config;
+
+
+/* Synchronize onto the trace stream.
+ *
+ * Search for the next synchronization point in forward or backward direction
+ * starting at @pos using the trace configuration @config.
+ *
+ * On success, stores a pointer to the next synchronization point in @sync.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_internal if @sync, @pos, or @config is NULL.
+ * Returns -pte_nosync if @pos lies outside of @config's buffer.
+ * Returns -pte_eos if no further synchronization point is found.
+ */
+extern int pt_sync_forward(const uint8_t **sync, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_sync_backward(const uint8_t **sync, const uint8_t *pos,
+ const struct pt_config *config);
+
+/* Manually synchronize onto the trace stream.
+ *
+ * Validate that @pos is within the bounds of @config's trace buffer and that
+ * there is a synchronization point at @pos.
+ *
+ * On success, stores @pos in @sync.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_eos if @pos is outside of @config's trace buffer.
+ * Returns -pte_internal if @sync, @pos, or @config is NULL.
+ * Returns -pte_bad_packet if there is no PSB at @pos.
+ */
+extern int pt_sync_set(const uint8_t **sync, const uint8_t *pos,
+ const struct pt_config *config);
+
+#endif /* PT_SYNC_H */
diff --git a/libipt/internal/include/pt_time.h b/libipt/internal/include/pt_time.h
new file mode 100644
index 000000000000..1f3816ae163a
--- /dev/null
+++ b/libipt/internal/include/pt_time.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_TIME_H
+#define PT_TIME_H
+
+#include <stdint.h>
+
+struct pt_config;
+struct pt_packet_tsc;
+struct pt_packet_cbr;
+struct pt_packet_tma;
+struct pt_packet_mtc;
+struct pt_packet_cyc;
+
+
+/* Intel(R) Processor Trace timing. */
+struct pt_time {
+ /* The estimated Time Stamp Count. */
+ uint64_t tsc;
+
+ /* The base Time Stamp Count (from TSC and MTC). */
+ uint64_t base;
+
+ /* The estimated Fast Counter. */
+ uint64_t fc;
+
+ /* The adjusted last CTC value (from MTC and TMA). */
+ uint32_t ctc;
+
+ /* The adjusted CTC value when @fc was cleared (from MTC and TMA). */
+ uint32_t ctc_cyc;
+
+ /* The number of lost MTC updates. */
+ uint32_t lost_mtc;
+
+ /* The number of lost CYC updates. */
+ uint32_t lost_cyc;
+
+ /* The core:bus ratio. */
+ uint8_t cbr;
+
+ /* A flag saying whether we have seen a TSC packet. */
+ uint32_t have_tsc:1;
+
+ /* A flag saying whether we have seen a CBR packet. */
+ uint32_t have_cbr:1;
+
+ /* A flag saying whether we have seen a TMA packet. */
+ uint32_t have_tma:1;
+
+ /* A flag saying whether we have seen a MTC packet. */
+ uint32_t have_mtc:1;
+};
+
+/* Initialize (or reset) the time. */
+extern void pt_time_init(struct pt_time *time);
+
+/* Query the current time.
+ *
+ * Provides the estimated Time Stamp Count value in @tsc.
+ *
+ * If @lost_mtc is not NULL, provides the number of lost MTC packets.
+ * If @lost_cyc is not NULL, provides the number of lost CYC packets.
+ *
+ * Returns zero on success; a negative error code, otherwise.
+ * Returns -pte_internal if @tsc or @time is NULL.
+ * Returns -pte_no_time if there has not been a TSC packet.
+ */
+extern int pt_time_query_tsc(uint64_t *tsc, uint32_t *lost_mtc,
+ uint32_t *lost_cyc, const struct pt_time *time);
+
+/* Query the current core:bus ratio.
+ *
+ * Provides the core:bus ratio in @cbr.
+ *
+ * Returns zero on success; a negative error code, otherwise.
+ * Returns -pte_internal if @cbr or @time is NULL.
+ * Returns -pte_no_cbr if there has not been a CBR packet.
+ */
+extern int pt_time_query_cbr(uint32_t *cbr, const struct pt_time *time);
+
+/* Update the time based on an Intel PT packet.
+ *
+ * Returns zero on success.
+ * Returns a negative error code, otherwise.
+ */
+extern int pt_time_update_tsc(struct pt_time *, const struct pt_packet_tsc *,
+ const struct pt_config *);
+extern int pt_time_update_cbr(struct pt_time *, const struct pt_packet_cbr *,
+ const struct pt_config *);
+extern int pt_time_update_tma(struct pt_time *, const struct pt_packet_tma *,
+ const struct pt_config *);
+extern int pt_time_update_mtc(struct pt_time *, const struct pt_packet_mtc *,
+ const struct pt_config *);
+/* @fcr is the fast-counter:cycles ratio obtained by calibration. */
+extern int pt_time_update_cyc(struct pt_time *, const struct pt_packet_cyc *,
+ const struct pt_config *, uint64_t fcr);
+
+
+/* Timing calibration.
+ *
+ * Used for estimating the Fast-Counter:Cycles ratio.
+ *
+ * Ideally, we calibrate by counting CYCs between MTCs. Lacking MTCs, we
+ * use TSC, instead.
+ */
+struct pt_time_cal {
+ /* The estimated fast-counter:cycles ratio. */
+ uint64_t fcr;
+
+ /* The minimal and maximal @fcr values. */
+ uint64_t min_fcr, max_fcr;
+
+ /* The last TSC value.
+ *
+ * Used for calibrating at TSC.
+ */
+ uint64_t tsc;
+
+ /* The number of cycles since the last TSC (from CYC).
+ *
+ * Used for calibrating at TSC.
+ */
+ uint64_t cyc_tsc;
+
+ /* The number of cycles since the last MTC (from CYC).
+ *
+ * Used for calibrating at MTC.
+ */
+ uint64_t cyc_mtc;
+
+ /* The adjusted last CTC value (from MTC).
+ *
+ * Used for calibrating at MTC.
+ */
+ uint32_t ctc;
+
+ /* The number of lost MTC updates since the last successful update. */
+ uint32_t lost_mtc;
+
+ /* A flag saying whether we have seen a MTC packet. */
+ uint32_t have_mtc:1;
+};
+
+enum {
+ /* The amount by which the fcr value is right-shifted.
+ *
+ * Do not shift the value obtained by pt_tcal_fcr() when passing it to
+ * pt_time_update_cyc().
+ * Do shift the value passed to pt_tcal_set_fcr().
+ */
+ pt_tcal_fcr_shr = 8
+};
+
+/* Initialize of reset timing calibration. */
+extern void pt_tcal_init(struct pt_time_cal *tcal);
+
+/* Query the estimated fast-counter:cycles ratio.
+ *
+ * Provides the estimated ratio in @fcr unless -pte_internal or
+ * -pte_no_time is returned.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @fcr or @tcal is NULL.
+ * Returns -pte_no_time if no information is available.
+ */
+extern int pt_tcal_fcr(uint64_t *fcr, const struct pt_time_cal *tcal);
+
+/* Set the fast-counter:cycles ratio.
+ *
+ * Timing calibration takes one CBR or two MTC packets before it can provide
+ * first estimations. Use this to supply an initial value to be used in the
+ * meantime.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @cal is NULL.
+ */
+extern int pt_tcal_set_fcr(struct pt_time_cal *tcal, uint64_t fcr);
+
+/* Update calibration based on an Intel PT packet.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern int pt_tcal_update_tsc(struct pt_time_cal *,
+ const struct pt_packet_tsc *,
+ const struct pt_config *);
+extern int pt_tcal_header_tsc(struct pt_time_cal *,
+ const struct pt_packet_tsc *,
+ const struct pt_config *);
+extern int pt_tcal_update_cbr(struct pt_time_cal *,
+ const struct pt_packet_cbr *,
+ const struct pt_config *);
+extern int pt_tcal_header_cbr(struct pt_time_cal *,
+ const struct pt_packet_cbr *,
+ const struct pt_config *);
+extern int pt_tcal_update_tma(struct pt_time_cal *,
+ const struct pt_packet_tma *,
+ const struct pt_config *);
+extern int pt_tcal_update_mtc(struct pt_time_cal *,
+ const struct pt_packet_mtc *,
+ const struct pt_config *);
+extern int pt_tcal_update_cyc(struct pt_time_cal *,
+ const struct pt_packet_cyc *,
+ const struct pt_config *);
+
+#endif /* PT_TIME_H */
diff --git a/libipt/internal/include/pt_tnt_cache.h b/libipt/internal/include/pt_tnt_cache.h
new file mode 100644
index 000000000000..67d2b3798de1
--- /dev/null
+++ b/libipt/internal/include/pt_tnt_cache.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_TNT_CACHE_H
+#define PT_TNT_CACHE_H
+
+#include <stdint.h>
+
+struct pt_packet_tnt;
+struct pt_config;
+
+
+/* Keeping track of tnt indicators. */
+struct pt_tnt_cache {
+ /* The last tnt. */
+ uint64_t tnt;
+
+ /* The index into the above tnt.
+ *
+ * (tnt & index) gives the current tnt entry.
+ * (index >>= 1) moves the index to the next tnt entry.
+ * (index == 0) means that the current tnt is empty.
+ */
+ uint64_t index;
+};
+
+
+/* Initialize (or reset) the tnt cache. */
+extern void pt_tnt_cache_init(struct pt_tnt_cache *cache);
+
+/* Check if the tnt cache is empty.
+ *
+ * Returns 0 if the tnt cache is not empty.
+ * Returns > 0 if the tnt cache is empty.
+ * Returns -pte_invalid if @cache is NULL.
+ */
+extern int pt_tnt_cache_is_empty(const struct pt_tnt_cache *cache);
+
+/* Query the next tnt indicator.
+ *
+ * This consumes the returned tnt indicator in the cache.
+ *
+ * Returns 0 if the next branch is not taken.
+ * Returns > 0 if the next branch is taken.
+ * Returns -pte_invalid if @cache is NULL.
+ * Returns -pte_bad_query if there is no tnt cached.
+ */
+extern int pt_tnt_cache_query(struct pt_tnt_cache *cache);
+
+/* Update the tnt cache based on Intel PT packets.
+ *
+ * Updates @cache based on @packet and, if non-null, @config.
+ *
+ * Returns zero on success.
+ * Returns -pte_invalid if @cache or @packet is NULL.
+ * Returns -pte_bad_packet if @packet appears to be corrupted.
+ * Returns -pte_bad_context if the tnt cache is not empty.
+ */
+extern int pt_tnt_cache_update_tnt(struct pt_tnt_cache *cache,
+ const struct pt_packet_tnt *packet,
+ const struct pt_config *config);
+
+#endif /* PT_TNT_CACHE_H */
diff --git a/libipt/internal/include/pti-disp-defs.h b/libipt/internal/include/pti-disp-defs.h
new file mode 100644
index 000000000000..82a6d04245b7
--- /dev/null
+++ b/libipt/internal/include/pti-disp-defs.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(PTI_DISP_DEFS_H)
+#define PTI_DISP_DEFS_H
+
+#define PTI_DISP_NONE 0
+#define PTI_PRESERVE_DEFAULT 1
+#define PTI_BRDISP8 2
+#define PTI_DISP_BUCKET_0_l1 3
+#define PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2 4
+#define PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2 5
+#define PTI_RESOLVE_BYREG_DISP_map0x0_op0xc7_l1 6
+#endif
diff --git a/libipt/internal/include/pti-disp.h b/libipt/internal/include/pti-disp.h
new file mode 100644
index 000000000000..99e97a334230
--- /dev/null
+++ b/libipt/internal/include/pti-disp.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+static uint8_t disp_bytes_map_0x0[256] = {
+/*opcode 0x0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf*/ 0,
+/*opcode 0x10*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x11*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x12*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x13*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x14*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x15*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x16*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x17*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x18*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x19*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x20*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x21*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x22*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x23*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x24*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x25*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x26*/ 0,
+/*opcode 0x27*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x28*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x29*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2e*/ 0,
+/*opcode 0x2f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x30*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x31*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x32*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x33*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x34*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x35*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x36*/ 0,
+/*opcode 0x37*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x38*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x39*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3e*/ 0,
+/*opcode 0x3f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x40*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x41*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x42*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x43*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x44*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x45*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x46*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x47*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x48*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x49*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x50*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x51*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x52*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x53*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x54*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x55*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x56*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x57*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x58*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x59*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x60*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x61*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x62*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x63*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x64*/ 0,
+/*opcode 0x65*/ 0,
+/*opcode 0x66*/ 0,
+/*opcode 0x67*/ 0,
+/*opcode 0x68*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x69*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x70*/ PTI_BRDISP8,
+/*opcode 0x71*/ PTI_BRDISP8,
+/*opcode 0x72*/ PTI_BRDISP8,
+/*opcode 0x73*/ PTI_BRDISP8,
+/*opcode 0x74*/ PTI_BRDISP8,
+/*opcode 0x75*/ PTI_BRDISP8,
+/*opcode 0x76*/ PTI_BRDISP8,
+/*opcode 0x77*/ PTI_BRDISP8,
+/*opcode 0x78*/ PTI_BRDISP8,
+/*opcode 0x79*/ PTI_BRDISP8,
+/*opcode 0x7a*/ PTI_BRDISP8,
+/*opcode 0x7b*/ PTI_BRDISP8,
+/*opcode 0x7c*/ PTI_BRDISP8,
+/*opcode 0x7d*/ PTI_BRDISP8,
+/*opcode 0x7e*/ PTI_BRDISP8,
+/*opcode 0x7f*/ PTI_BRDISP8,
+/*opcode 0x80*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x81*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x82*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x83*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x84*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x85*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x86*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x87*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x88*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x89*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x90*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x91*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x92*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x93*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x94*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x95*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x96*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x97*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x98*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x99*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9a*/ PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x9b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa0*/ PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2,
+/*opcode 0xa1*/ PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2,
+/*opcode 0xa2*/ PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2,
+/*opcode 0xa3*/ PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2,
+/*opcode 0xa4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xaa*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xab*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xac*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xad*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xae*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xaf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xba*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbe*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc7*/ PTI_RESOLVE_BYREG_DISP_map0x0_op0xc7_l1,
+/*opcode 0xc8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xca*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xce*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xda*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xde*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe0*/ PTI_BRDISP8,
+/*opcode 0xe1*/ PTI_BRDISP8,
+/*opcode 0xe2*/ PTI_BRDISP8,
+/*opcode 0xe3*/ PTI_BRDISP8,
+/*opcode 0xe4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe8*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0xe9*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0xea*/ PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xeb*/ PTI_BRDISP8,
+/*opcode 0xec*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xed*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xee*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xef*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf0*/ 0,
+/*opcode 0xf1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf2*/ 0,
+/*opcode 0xf3*/ 0,
+/*opcode 0xf4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfa*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfe*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xff*/ PTI_PRESERVE_DEFAULT,
+};
+static uint8_t disp_bytes_map_0x0F[256] = {
+/*opcode 0x0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4*/ 0,
+/*opcode 0x5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa*/ 0,
+/*opcode 0xb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc*/ 0,
+/*opcode 0xd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf*/ 0,
+/*opcode 0x10*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x11*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x12*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x13*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x14*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x15*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x16*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x17*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x18*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x19*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x20*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x21*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x22*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x23*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x24*/ 0,
+/*opcode 0x25*/ 0,
+/*opcode 0x26*/ 0,
+/*opcode 0x27*/ 0,
+/*opcode 0x28*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x29*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x30*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x31*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x32*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x33*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x34*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x35*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x36*/ 0,
+/*opcode 0x37*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x38*/ 0,
+/*opcode 0x39*/ 0,
+/*opcode 0x3a*/ 0,
+/*opcode 0x3b*/ 0,
+/*opcode 0x3c*/ 0,
+/*opcode 0x3d*/ 0,
+/*opcode 0x3e*/ 0,
+/*opcode 0x3f*/ 0,
+/*opcode 0x40*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x41*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x42*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x43*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x44*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x45*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x46*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x47*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x48*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x49*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x50*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x51*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x52*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x53*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x54*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x55*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x56*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x57*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x58*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x59*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x60*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x61*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x62*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x63*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x64*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x65*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x66*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x67*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x68*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x69*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x70*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x71*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x72*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x73*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x74*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x75*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x76*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x77*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x78*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x79*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x80*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x81*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x82*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x83*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x84*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x85*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x86*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x87*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x88*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x89*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x8a*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x8b*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x8c*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x8d*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x8e*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x8f*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x90*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x91*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x92*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x93*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x94*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x95*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x96*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x97*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x98*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x99*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa6*/ 0,
+/*opcode 0xa7*/ 0,
+/*opcode 0xa8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xaa*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xab*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xac*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xad*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xae*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xaf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb9*/ 0,
+/*opcode 0xba*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbe*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xca*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xce*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xda*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xde*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xea*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xeb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xec*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xed*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xee*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xef*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfa*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfe*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xff*/ 0,
+};
diff --git a/libipt/internal/include/pti-imm-defs.h b/libipt/internal/include/pti-imm-defs.h
new file mode 100644
index 000000000000..598c335638b8
--- /dev/null
+++ b/libipt/internal/include/pti-imm-defs.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(PTI_IMM_DEFS_H)
+#define PTI_IMM_DEFS_H
+
+#define PTI_IMM_NONE 0
+#define PTI_0_IMM_WIDTH_CONST_l2 1
+#define PTI_UIMM8_IMM_WIDTH_CONST_l2 2
+#define PTI_SIMM8_IMM_WIDTH_CONST_l2 3
+#define PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2 4
+#define PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2 5
+#define PTI_UIMM16_IMM_WIDTH_CONST_l2 6
+#define PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf7_l1 7
+#define PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xc7_l1 8
+#define PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_DF64_EOSZ_l2 9
+#define PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf6_l1 10
+#define PTI_IMM_hasimm_map0x0_op0xc8_l1 11
+#define PTI_IMM_hasimm_map0x0F_op0x78_l1 12
+
+#endif
diff --git a/libipt/internal/include/pti-imm.h b/libipt/internal/include/pti-imm.h
new file mode 100644
index 000000000000..7f3ceab2b307
--- /dev/null
+++ b/libipt/internal/include/pti-imm.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+static uint8_t imm_bytes_map_0x0[256] = {
+/*opcode 0x0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x5*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xd*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xe*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf*/ 0,
+/*opcode 0x10*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x11*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x12*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x13*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x14*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x15*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x16*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x17*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x18*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x19*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1c*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x1d*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x1e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x20*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x21*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x22*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x23*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x24*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x25*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x26*/ 0,
+/*opcode 0x27*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x28*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x29*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2c*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x2d*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x2e*/ 0,
+/*opcode 0x2f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x30*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x31*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x32*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x33*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x34*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x35*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x36*/ 0,
+/*opcode 0x37*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x38*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x39*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x3a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x3b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x3c*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x3d*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x3e*/ 0,
+/*opcode 0x3f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x40*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x41*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x42*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x43*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x44*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x45*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x46*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x47*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x48*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x49*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x50*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x51*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x52*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x53*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x54*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x55*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x56*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x57*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x58*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x59*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x60*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x61*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x62*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x63*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x64*/ 0,
+/*opcode 0x65*/ 0,
+/*opcode 0x66*/ 0,
+/*opcode 0x67*/ 0,
+/*opcode 0x68*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_DF64_EOSZ_l2,
+/*opcode 0x69*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x6a*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x6b*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x6c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x70*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x71*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x72*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x73*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x74*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x75*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x76*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x77*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x78*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x79*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x80*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x81*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x82*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x83*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x84*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x85*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x86*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x87*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x88*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x89*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x90*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x91*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x92*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x93*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x94*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x95*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x96*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x97*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x98*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x99*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9a*/ PTI_UIMM16_IMM_WIDTH_CONST_l2,
+/*opcode 0x9b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa8*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xa9*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xaa*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xab*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xac*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xad*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xae*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xaf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb0*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb1*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb2*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb3*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb4*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb5*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb6*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb7*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb8*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xb9*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xba*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xbb*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xbc*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xbd*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xbe*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xbf*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xc0*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc1*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc2*/ PTI_UIMM16_IMM_WIDTH_CONST_l2,
+/*opcode 0xc3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc6*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc7*/ PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xc7_l1,
+/*opcode 0xc8*/ PTI_IMM_hasimm_map0x0_op0xc8_l1,
+/*opcode 0xc9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xca*/ PTI_UIMM16_IMM_WIDTH_CONST_l2,
+/*opcode 0xcb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcd*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xce*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd4*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xd5*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xd6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xda*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xde*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe4*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xe5*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xe6*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xe7*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xe8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xea*/ PTI_UIMM16_IMM_WIDTH_CONST_l2,
+/*opcode 0xeb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xec*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xed*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xee*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xef*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf0*/ 0,
+/*opcode 0xf1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf2*/ 0,
+/*opcode 0xf3*/ 0,
+/*opcode 0xf4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf6*/ PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf6_l1,
+/*opcode 0xf7*/ PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf7_l1,
+/*opcode 0xf8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfa*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfe*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xff*/ PTI_0_IMM_WIDTH_CONST_l2,
+};
+static uint8_t imm_bytes_map_0x0F[256] = {
+/*opcode 0x0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4*/ 0,
+/*opcode 0x5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa*/ 0,
+/*opcode 0xb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc*/ 0,
+/*opcode 0xd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf*/ 0,
+/*opcode 0x10*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x11*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x12*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x13*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x14*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x15*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x16*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x17*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x18*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x19*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x20*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x21*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x22*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x23*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x24*/ 0,
+/*opcode 0x25*/ 0,
+/*opcode 0x26*/ 0,
+/*opcode 0x27*/ 0,
+/*opcode 0x28*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x29*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x30*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x31*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x32*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x33*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x34*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x35*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x36*/ 0,
+/*opcode 0x37*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x38*/ 0,
+/*opcode 0x39*/ 0,
+/*opcode 0x3a*/ 0,
+/*opcode 0x3b*/ 0,
+/*opcode 0x3c*/ 0,
+/*opcode 0x3d*/ 0,
+/*opcode 0x3e*/ 0,
+/*opcode 0x3f*/ 0,
+/*opcode 0x40*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x41*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x42*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x43*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x44*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x45*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x46*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x47*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x48*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x49*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x50*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x51*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x52*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x53*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x54*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x55*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x56*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x57*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x58*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x59*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x60*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x61*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x62*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x63*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x64*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x65*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x66*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x67*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x68*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x69*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x70*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x71*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x72*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x73*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x74*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x75*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x76*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x77*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x78*/ PTI_IMM_hasimm_map0x0F_op0x78_l1,
+/*opcode 0x79*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x80*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x81*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x82*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x83*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x84*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x85*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x86*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x87*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x88*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x89*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x90*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x91*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x92*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x93*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x94*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x95*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x96*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x97*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x98*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x99*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa4*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xa5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa6*/ 0,
+/*opcode 0xa7*/ 0,
+/*opcode 0xa8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xaa*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xab*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xac*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xad*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xae*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xaf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb9*/ 0,
+/*opcode 0xba*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xbb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xbc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xbd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xbe*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xbf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc2*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc4*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc5*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc6*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xca*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xce*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xda*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xde*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xea*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xeb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xec*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xed*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xee*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xef*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfa*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfe*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xff*/ 0,
+};
diff --git a/libipt/internal/include/pti-modrm-defs.h b/libipt/internal/include/pti-modrm-defs.h
new file mode 100644
index 000000000000..fd75618ce172
--- /dev/null
+++ b/libipt/internal/include/pti-modrm-defs.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(PTI_MODRM_DEFS_H)
+#define PTI_MODRM_DEFS_H
+
+
+#define PTI_MODRM_FALSE 0
+#define PTI_MODRM_TRUE 1
+#define PTI_MODRM_IGNORE_MOD 2
+#define PTI_MODRM_UNDEF 3
+
+#endif
diff --git a/libipt/internal/include/pti-modrm.h b/libipt/internal/include/pti-modrm.h
new file mode 100644
index 000000000000..ddddf63d29db
--- /dev/null
+++ b/libipt/internal/include/pti-modrm.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+static uint8_t has_modrm_map_0x0[256] = {
+/*opcode 0x0*/ PTI_MODRM_TRUE,
+/*opcode 0x1*/ PTI_MODRM_TRUE,
+/*opcode 0x2*/ PTI_MODRM_TRUE,
+/*opcode 0x3*/ PTI_MODRM_TRUE,
+/*opcode 0x4*/ PTI_MODRM_FALSE,
+/*opcode 0x5*/ PTI_MODRM_FALSE,
+/*opcode 0x6*/ PTI_MODRM_FALSE,
+/*opcode 0x7*/ PTI_MODRM_FALSE,
+/*opcode 0x8*/ PTI_MODRM_TRUE,
+/*opcode 0x9*/ PTI_MODRM_TRUE,
+/*opcode 0xa*/ PTI_MODRM_TRUE,
+/*opcode 0xb*/ PTI_MODRM_TRUE,
+/*opcode 0xc*/ PTI_MODRM_FALSE,
+/*opcode 0xd*/ PTI_MODRM_FALSE,
+/*opcode 0xe*/ PTI_MODRM_FALSE,
+/*opcode 0xf*/ PTI_MODRM_UNDEF,
+/*opcode 0x10*/ PTI_MODRM_TRUE,
+/*opcode 0x11*/ PTI_MODRM_TRUE,
+/*opcode 0x12*/ PTI_MODRM_TRUE,
+/*opcode 0x13*/ PTI_MODRM_TRUE,
+/*opcode 0x14*/ PTI_MODRM_FALSE,
+/*opcode 0x15*/ PTI_MODRM_FALSE,
+/*opcode 0x16*/ PTI_MODRM_FALSE,
+/*opcode 0x17*/ PTI_MODRM_FALSE,
+/*opcode 0x18*/ PTI_MODRM_TRUE,
+/*opcode 0x19*/ PTI_MODRM_TRUE,
+/*opcode 0x1a*/ PTI_MODRM_TRUE,
+/*opcode 0x1b*/ PTI_MODRM_TRUE,
+/*opcode 0x1c*/ PTI_MODRM_FALSE,
+/*opcode 0x1d*/ PTI_MODRM_FALSE,
+/*opcode 0x1e*/ PTI_MODRM_FALSE,
+/*opcode 0x1f*/ PTI_MODRM_FALSE,
+/*opcode 0x20*/ PTI_MODRM_TRUE,
+/*opcode 0x21*/ PTI_MODRM_TRUE,
+/*opcode 0x22*/ PTI_MODRM_TRUE,
+/*opcode 0x23*/ PTI_MODRM_TRUE,
+/*opcode 0x24*/ PTI_MODRM_FALSE,
+/*opcode 0x25*/ PTI_MODRM_FALSE,
+/*opcode 0x26*/ PTI_MODRM_UNDEF,
+/*opcode 0x27*/ PTI_MODRM_FALSE,
+/*opcode 0x28*/ PTI_MODRM_TRUE,
+/*opcode 0x29*/ PTI_MODRM_TRUE,
+/*opcode 0x2a*/ PTI_MODRM_TRUE,
+/*opcode 0x2b*/ PTI_MODRM_TRUE,
+/*opcode 0x2c*/ PTI_MODRM_FALSE,
+/*opcode 0x2d*/ PTI_MODRM_FALSE,
+/*opcode 0x2e*/ PTI_MODRM_UNDEF,
+/*opcode 0x2f*/ PTI_MODRM_FALSE,
+/*opcode 0x30*/ PTI_MODRM_TRUE,
+/*opcode 0x31*/ PTI_MODRM_TRUE,
+/*opcode 0x32*/ PTI_MODRM_TRUE,
+/*opcode 0x33*/ PTI_MODRM_TRUE,
+/*opcode 0x34*/ PTI_MODRM_FALSE,
+/*opcode 0x35*/ PTI_MODRM_FALSE,
+/*opcode 0x36*/ PTI_MODRM_UNDEF,
+/*opcode 0x37*/ PTI_MODRM_FALSE,
+/*opcode 0x38*/ PTI_MODRM_TRUE,
+/*opcode 0x39*/ PTI_MODRM_TRUE,
+/*opcode 0x3a*/ PTI_MODRM_TRUE,
+/*opcode 0x3b*/ PTI_MODRM_TRUE,
+/*opcode 0x3c*/ PTI_MODRM_FALSE,
+/*opcode 0x3d*/ PTI_MODRM_FALSE,
+/*opcode 0x3e*/ PTI_MODRM_UNDEF,
+/*opcode 0x3f*/ PTI_MODRM_FALSE,
+/*opcode 0x40*/ PTI_MODRM_FALSE,
+/*opcode 0x41*/ PTI_MODRM_FALSE,
+/*opcode 0x42*/ PTI_MODRM_FALSE,
+/*opcode 0x43*/ PTI_MODRM_FALSE,
+/*opcode 0x44*/ PTI_MODRM_FALSE,
+/*opcode 0x45*/ PTI_MODRM_FALSE,
+/*opcode 0x46*/ PTI_MODRM_FALSE,
+/*opcode 0x47*/ PTI_MODRM_FALSE,
+/*opcode 0x48*/ PTI_MODRM_FALSE,
+/*opcode 0x49*/ PTI_MODRM_FALSE,
+/*opcode 0x4a*/ PTI_MODRM_FALSE,
+/*opcode 0x4b*/ PTI_MODRM_FALSE,
+/*opcode 0x4c*/ PTI_MODRM_FALSE,
+/*opcode 0x4d*/ PTI_MODRM_FALSE,
+/*opcode 0x4e*/ PTI_MODRM_FALSE,
+/*opcode 0x4f*/ PTI_MODRM_FALSE,
+/*opcode 0x50*/ PTI_MODRM_FALSE,
+/*opcode 0x51*/ PTI_MODRM_FALSE,
+/*opcode 0x52*/ PTI_MODRM_FALSE,
+/*opcode 0x53*/ PTI_MODRM_FALSE,
+/*opcode 0x54*/ PTI_MODRM_FALSE,
+/*opcode 0x55*/ PTI_MODRM_FALSE,
+/*opcode 0x56*/ PTI_MODRM_FALSE,
+/*opcode 0x57*/ PTI_MODRM_FALSE,
+/*opcode 0x58*/ PTI_MODRM_FALSE,
+/*opcode 0x59*/ PTI_MODRM_FALSE,
+/*opcode 0x5a*/ PTI_MODRM_FALSE,
+/*opcode 0x5b*/ PTI_MODRM_FALSE,
+/*opcode 0x5c*/ PTI_MODRM_FALSE,
+/*opcode 0x5d*/ PTI_MODRM_FALSE,
+/*opcode 0x5e*/ PTI_MODRM_FALSE,
+/*opcode 0x5f*/ PTI_MODRM_FALSE,
+/*opcode 0x60*/ PTI_MODRM_FALSE,
+/*opcode 0x61*/ PTI_MODRM_FALSE,
+/*opcode 0x62*/ PTI_MODRM_TRUE,
+/*opcode 0x63*/ PTI_MODRM_TRUE,
+/*opcode 0x64*/ PTI_MODRM_UNDEF,
+/*opcode 0x65*/ PTI_MODRM_UNDEF,
+/*opcode 0x66*/ PTI_MODRM_UNDEF,
+/*opcode 0x67*/ PTI_MODRM_UNDEF,
+/*opcode 0x68*/ PTI_MODRM_FALSE,
+/*opcode 0x69*/ PTI_MODRM_TRUE,
+/*opcode 0x6a*/ PTI_MODRM_FALSE,
+/*opcode 0x6b*/ PTI_MODRM_TRUE,
+/*opcode 0x6c*/ PTI_MODRM_FALSE,
+/*opcode 0x6d*/ PTI_MODRM_FALSE,
+/*opcode 0x6e*/ PTI_MODRM_FALSE,
+/*opcode 0x6f*/ PTI_MODRM_FALSE,
+/*opcode 0x70*/ PTI_MODRM_FALSE,
+/*opcode 0x71*/ PTI_MODRM_FALSE,
+/*opcode 0x72*/ PTI_MODRM_FALSE,
+/*opcode 0x73*/ PTI_MODRM_FALSE,
+/*opcode 0x74*/ PTI_MODRM_FALSE,
+/*opcode 0x75*/ PTI_MODRM_FALSE,
+/*opcode 0x76*/ PTI_MODRM_FALSE,
+/*opcode 0x77*/ PTI_MODRM_FALSE,
+/*opcode 0x78*/ PTI_MODRM_FALSE,
+/*opcode 0x79*/ PTI_MODRM_FALSE,
+/*opcode 0x7a*/ PTI_MODRM_FALSE,
+/*opcode 0x7b*/ PTI_MODRM_FALSE,
+/*opcode 0x7c*/ PTI_MODRM_FALSE,
+/*opcode 0x7d*/ PTI_MODRM_FALSE,
+/*opcode 0x7e*/ PTI_MODRM_FALSE,
+/*opcode 0x7f*/ PTI_MODRM_FALSE,
+/*opcode 0x80*/ PTI_MODRM_TRUE,
+/*opcode 0x81*/ PTI_MODRM_TRUE,
+/*opcode 0x82*/ PTI_MODRM_TRUE,
+/*opcode 0x83*/ PTI_MODRM_TRUE,
+/*opcode 0x84*/ PTI_MODRM_TRUE,
+/*opcode 0x85*/ PTI_MODRM_TRUE,
+/*opcode 0x86*/ PTI_MODRM_TRUE,
+/*opcode 0x87*/ PTI_MODRM_TRUE,
+/*opcode 0x88*/ PTI_MODRM_TRUE,
+/*opcode 0x89*/ PTI_MODRM_TRUE,
+/*opcode 0x8a*/ PTI_MODRM_TRUE,
+/*opcode 0x8b*/ PTI_MODRM_TRUE,
+/*opcode 0x8c*/ PTI_MODRM_TRUE,
+/*opcode 0x8d*/ PTI_MODRM_TRUE,
+/*opcode 0x8e*/ PTI_MODRM_TRUE,
+/*opcode 0x8f*/ PTI_MODRM_TRUE,
+/*opcode 0x90*/ PTI_MODRM_FALSE,
+/*opcode 0x91*/ PTI_MODRM_FALSE,
+/*opcode 0x92*/ PTI_MODRM_FALSE,
+/*opcode 0x93*/ PTI_MODRM_FALSE,
+/*opcode 0x94*/ PTI_MODRM_FALSE,
+/*opcode 0x95*/ PTI_MODRM_FALSE,
+/*opcode 0x96*/ PTI_MODRM_FALSE,
+/*opcode 0x97*/ PTI_MODRM_FALSE,
+/*opcode 0x98*/ PTI_MODRM_FALSE,
+/*opcode 0x99*/ PTI_MODRM_FALSE,
+/*opcode 0x9a*/ PTI_MODRM_FALSE,
+/*opcode 0x9b*/ PTI_MODRM_FALSE,
+/*opcode 0x9c*/ PTI_MODRM_FALSE,
+/*opcode 0x9d*/ PTI_MODRM_FALSE,
+/*opcode 0x9e*/ PTI_MODRM_FALSE,
+/*opcode 0x9f*/ PTI_MODRM_FALSE,
+/*opcode 0xa0*/ PTI_MODRM_FALSE,
+/*opcode 0xa1*/ PTI_MODRM_FALSE,
+/*opcode 0xa2*/ PTI_MODRM_FALSE,
+/*opcode 0xa3*/ PTI_MODRM_FALSE,
+/*opcode 0xa4*/ PTI_MODRM_FALSE,
+/*opcode 0xa5*/ PTI_MODRM_FALSE,
+/*opcode 0xa6*/ PTI_MODRM_FALSE,
+/*opcode 0xa7*/ PTI_MODRM_FALSE,
+/*opcode 0xa8*/ PTI_MODRM_FALSE,
+/*opcode 0xa9*/ PTI_MODRM_FALSE,
+/*opcode 0xaa*/ PTI_MODRM_FALSE,
+/*opcode 0xab*/ PTI_MODRM_FALSE,
+/*opcode 0xac*/ PTI_MODRM_FALSE,
+/*opcode 0xad*/ PTI_MODRM_FALSE,
+/*opcode 0xae*/ PTI_MODRM_FALSE,
+/*opcode 0xaf*/ PTI_MODRM_FALSE,
+/*opcode 0xb0*/ PTI_MODRM_FALSE,
+/*opcode 0xb1*/ PTI_MODRM_FALSE,
+/*opcode 0xb2*/ PTI_MODRM_FALSE,
+/*opcode 0xb3*/ PTI_MODRM_FALSE,
+/*opcode 0xb4*/ PTI_MODRM_FALSE,
+/*opcode 0xb5*/ PTI_MODRM_FALSE,
+/*opcode 0xb6*/ PTI_MODRM_FALSE,
+/*opcode 0xb7*/ PTI_MODRM_FALSE,
+/*opcode 0xb8*/ PTI_MODRM_FALSE,
+/*opcode 0xb9*/ PTI_MODRM_FALSE,
+/*opcode 0xba*/ PTI_MODRM_FALSE,
+/*opcode 0xbb*/ PTI_MODRM_FALSE,
+/*opcode 0xbc*/ PTI_MODRM_FALSE,
+/*opcode 0xbd*/ PTI_MODRM_FALSE,
+/*opcode 0xbe*/ PTI_MODRM_FALSE,
+/*opcode 0xbf*/ PTI_MODRM_FALSE,
+/*opcode 0xc0*/ PTI_MODRM_TRUE,
+/*opcode 0xc1*/ PTI_MODRM_TRUE,
+/*opcode 0xc2*/ PTI_MODRM_FALSE,
+/*opcode 0xc3*/ PTI_MODRM_FALSE,
+/*opcode 0xc4*/ PTI_MODRM_TRUE,
+/*opcode 0xc5*/ PTI_MODRM_TRUE,
+/*opcode 0xc6*/ PTI_MODRM_TRUE,
+/*opcode 0xc7*/ PTI_MODRM_TRUE,
+/*opcode 0xc8*/ PTI_MODRM_FALSE,
+/*opcode 0xc9*/ PTI_MODRM_FALSE,
+/*opcode 0xca*/ PTI_MODRM_FALSE,
+/*opcode 0xcb*/ PTI_MODRM_FALSE,
+/*opcode 0xcc*/ PTI_MODRM_FALSE,
+/*opcode 0xcd*/ PTI_MODRM_FALSE,
+/*opcode 0xce*/ PTI_MODRM_FALSE,
+/*opcode 0xcf*/ PTI_MODRM_FALSE,
+/*opcode 0xd0*/ PTI_MODRM_TRUE,
+/*opcode 0xd1*/ PTI_MODRM_TRUE,
+/*opcode 0xd2*/ PTI_MODRM_TRUE,
+/*opcode 0xd3*/ PTI_MODRM_TRUE,
+/*opcode 0xd4*/ PTI_MODRM_FALSE,
+/*opcode 0xd5*/ PTI_MODRM_FALSE,
+/*opcode 0xd6*/ PTI_MODRM_FALSE,
+/*opcode 0xd7*/ PTI_MODRM_FALSE,
+/*opcode 0xd8*/ PTI_MODRM_TRUE,
+/*opcode 0xd9*/ PTI_MODRM_TRUE,
+/*opcode 0xda*/ PTI_MODRM_TRUE,
+/*opcode 0xdb*/ PTI_MODRM_TRUE,
+/*opcode 0xdc*/ PTI_MODRM_TRUE,
+/*opcode 0xdd*/ PTI_MODRM_TRUE,
+/*opcode 0xde*/ PTI_MODRM_TRUE,
+/*opcode 0xdf*/ PTI_MODRM_TRUE,
+/*opcode 0xe0*/ PTI_MODRM_FALSE,
+/*opcode 0xe1*/ PTI_MODRM_FALSE,
+/*opcode 0xe2*/ PTI_MODRM_FALSE,
+/*opcode 0xe3*/ PTI_MODRM_FALSE,
+/*opcode 0xe4*/ PTI_MODRM_FALSE,
+/*opcode 0xe5*/ PTI_MODRM_FALSE,
+/*opcode 0xe6*/ PTI_MODRM_FALSE,
+/*opcode 0xe7*/ PTI_MODRM_FALSE,
+/*opcode 0xe8*/ PTI_MODRM_FALSE,
+/*opcode 0xe9*/ PTI_MODRM_FALSE,
+/*opcode 0xea*/ PTI_MODRM_FALSE,
+/*opcode 0xeb*/ PTI_MODRM_FALSE,
+/*opcode 0xec*/ PTI_MODRM_FALSE,
+/*opcode 0xed*/ PTI_MODRM_FALSE,
+/*opcode 0xee*/ PTI_MODRM_FALSE,
+/*opcode 0xef*/ PTI_MODRM_FALSE,
+/*opcode 0xf0*/ PTI_MODRM_UNDEF,
+/*opcode 0xf1*/ PTI_MODRM_FALSE,
+/*opcode 0xf2*/ PTI_MODRM_UNDEF,
+/*opcode 0xf3*/ PTI_MODRM_UNDEF,
+/*opcode 0xf4*/ PTI_MODRM_FALSE,
+/*opcode 0xf5*/ PTI_MODRM_FALSE,
+/*opcode 0xf6*/ PTI_MODRM_TRUE,
+/*opcode 0xf7*/ PTI_MODRM_TRUE,
+/*opcode 0xf8*/ PTI_MODRM_FALSE,
+/*opcode 0xf9*/ PTI_MODRM_FALSE,
+/*opcode 0xfa*/ PTI_MODRM_FALSE,
+/*opcode 0xfb*/ PTI_MODRM_FALSE,
+/*opcode 0xfc*/ PTI_MODRM_FALSE,
+/*opcode 0xfd*/ PTI_MODRM_FALSE,
+/*opcode 0xfe*/ PTI_MODRM_TRUE,
+/*opcode 0xff*/ PTI_MODRM_TRUE,
+};
+static uint8_t has_modrm_map_0x0F[256] = {
+/*opcode 0x0*/ PTI_MODRM_TRUE,
+/*opcode 0x1*/ PTI_MODRM_TRUE,
+/*opcode 0x2*/ PTI_MODRM_TRUE,
+/*opcode 0x3*/ PTI_MODRM_TRUE,
+/*opcode 0x4*/ PTI_MODRM_UNDEF,
+/*opcode 0x5*/ PTI_MODRM_FALSE,
+/*opcode 0x6*/ PTI_MODRM_FALSE,
+/*opcode 0x7*/ PTI_MODRM_FALSE,
+/*opcode 0x8*/ PTI_MODRM_FALSE,
+/*opcode 0x9*/ PTI_MODRM_FALSE,
+/*opcode 0xa*/ PTI_MODRM_UNDEF,
+/*opcode 0xb*/ PTI_MODRM_FALSE,
+/*opcode 0xc*/ PTI_MODRM_UNDEF,
+/*opcode 0xd*/ PTI_MODRM_TRUE,
+/*opcode 0xe*/ PTI_MODRM_FALSE,
+/*opcode 0xf*/ PTI_MODRM_UNDEF,
+/*opcode 0x10*/ PTI_MODRM_TRUE,
+/*opcode 0x11*/ PTI_MODRM_TRUE,
+/*opcode 0x12*/ PTI_MODRM_TRUE,
+/*opcode 0x13*/ PTI_MODRM_TRUE,
+/*opcode 0x14*/ PTI_MODRM_TRUE,
+/*opcode 0x15*/ PTI_MODRM_TRUE,
+/*opcode 0x16*/ PTI_MODRM_TRUE,
+/*opcode 0x17*/ PTI_MODRM_TRUE,
+/*opcode 0x18*/ PTI_MODRM_TRUE,
+/*opcode 0x19*/ PTI_MODRM_TRUE,
+/*opcode 0x1a*/ PTI_MODRM_TRUE,
+/*opcode 0x1b*/ PTI_MODRM_TRUE,
+/*opcode 0x1c*/ PTI_MODRM_TRUE,
+/*opcode 0x1d*/ PTI_MODRM_TRUE,
+/*opcode 0x1e*/ PTI_MODRM_TRUE,
+/*opcode 0x1f*/ PTI_MODRM_TRUE,
+/*opcode 0x20*/ PTI_MODRM_IGNORE_MOD,
+/*opcode 0x21*/ PTI_MODRM_IGNORE_MOD,
+/*opcode 0x22*/ PTI_MODRM_IGNORE_MOD,
+/*opcode 0x23*/ PTI_MODRM_IGNORE_MOD,
+/*opcode 0x24*/ PTI_MODRM_UNDEF,
+/*opcode 0x25*/ PTI_MODRM_UNDEF,
+/*opcode 0x26*/ PTI_MODRM_UNDEF,
+/*opcode 0x27*/ PTI_MODRM_UNDEF,
+/*opcode 0x28*/ PTI_MODRM_TRUE,
+/*opcode 0x29*/ PTI_MODRM_TRUE,
+/*opcode 0x2a*/ PTI_MODRM_TRUE,
+/*opcode 0x2b*/ PTI_MODRM_TRUE,
+/*opcode 0x2c*/ PTI_MODRM_TRUE,
+/*opcode 0x2d*/ PTI_MODRM_TRUE,
+/*opcode 0x2e*/ PTI_MODRM_TRUE,
+/*opcode 0x2f*/ PTI_MODRM_TRUE,
+/*opcode 0x30*/ PTI_MODRM_FALSE,
+/*opcode 0x31*/ PTI_MODRM_FALSE,
+/*opcode 0x32*/ PTI_MODRM_FALSE,
+/*opcode 0x33*/ PTI_MODRM_FALSE,
+/*opcode 0x34*/ PTI_MODRM_FALSE,
+/*opcode 0x35*/ PTI_MODRM_FALSE,
+/*opcode 0x36*/ PTI_MODRM_UNDEF,
+/*opcode 0x37*/ PTI_MODRM_FALSE,
+/*opcode 0x38*/ PTI_MODRM_UNDEF,
+/*opcode 0x39*/ PTI_MODRM_UNDEF,
+/*opcode 0x3a*/ PTI_MODRM_UNDEF,
+/*opcode 0x3b*/ PTI_MODRM_UNDEF,
+/*opcode 0x3c*/ PTI_MODRM_UNDEF,
+/*opcode 0x3d*/ PTI_MODRM_UNDEF,
+/*opcode 0x3e*/ PTI_MODRM_UNDEF,
+/*opcode 0x3f*/ PTI_MODRM_UNDEF,
+/*opcode 0x40*/ PTI_MODRM_TRUE,
+/*opcode 0x41*/ PTI_MODRM_TRUE,
+/*opcode 0x42*/ PTI_MODRM_TRUE,
+/*opcode 0x43*/ PTI_MODRM_TRUE,
+/*opcode 0x44*/ PTI_MODRM_TRUE,
+/*opcode 0x45*/ PTI_MODRM_TRUE,
+/*opcode 0x46*/ PTI_MODRM_TRUE,
+/*opcode 0x47*/ PTI_MODRM_TRUE,
+/*opcode 0x48*/ PTI_MODRM_TRUE,
+/*opcode 0x49*/ PTI_MODRM_TRUE,
+/*opcode 0x4a*/ PTI_MODRM_TRUE,
+/*opcode 0x4b*/ PTI_MODRM_TRUE,
+/*opcode 0x4c*/ PTI_MODRM_TRUE,
+/*opcode 0x4d*/ PTI_MODRM_TRUE,
+/*opcode 0x4e*/ PTI_MODRM_TRUE,
+/*opcode 0x4f*/ PTI_MODRM_TRUE,
+/*opcode 0x50*/ PTI_MODRM_TRUE,
+/*opcode 0x51*/ PTI_MODRM_TRUE,
+/*opcode 0x52*/ PTI_MODRM_TRUE,
+/*opcode 0x53*/ PTI_MODRM_TRUE,
+/*opcode 0x54*/ PTI_MODRM_TRUE,
+/*opcode 0x55*/ PTI_MODRM_TRUE,
+/*opcode 0x56*/ PTI_MODRM_TRUE,
+/*opcode 0x57*/ PTI_MODRM_TRUE,
+/*opcode 0x58*/ PTI_MODRM_TRUE,
+/*opcode 0x59*/ PTI_MODRM_TRUE,
+/*opcode 0x5a*/ PTI_MODRM_TRUE,
+/*opcode 0x5b*/ PTI_MODRM_TRUE,
+/*opcode 0x5c*/ PTI_MODRM_TRUE,
+/*opcode 0x5d*/ PTI_MODRM_TRUE,
+/*opcode 0x5e*/ PTI_MODRM_TRUE,
+/*opcode 0x5f*/ PTI_MODRM_TRUE,
+/*opcode 0x60*/ PTI_MODRM_TRUE,
+/*opcode 0x61*/ PTI_MODRM_TRUE,
+/*opcode 0x62*/ PTI_MODRM_TRUE,
+/*opcode 0x63*/ PTI_MODRM_TRUE,
+/*opcode 0x64*/ PTI_MODRM_TRUE,
+/*opcode 0x65*/ PTI_MODRM_TRUE,
+/*opcode 0x66*/ PTI_MODRM_TRUE,
+/*opcode 0x67*/ PTI_MODRM_TRUE,
+/*opcode 0x68*/ PTI_MODRM_TRUE,
+/*opcode 0x69*/ PTI_MODRM_TRUE,
+/*opcode 0x6a*/ PTI_MODRM_TRUE,
+/*opcode 0x6b*/ PTI_MODRM_TRUE,
+/*opcode 0x6c*/ PTI_MODRM_TRUE,
+/*opcode 0x6d*/ PTI_MODRM_TRUE,
+/*opcode 0x6e*/ PTI_MODRM_TRUE,
+/*opcode 0x6f*/ PTI_MODRM_TRUE,
+/*opcode 0x70*/ PTI_MODRM_TRUE,
+/*opcode 0x71*/ PTI_MODRM_TRUE,
+/*opcode 0x72*/ PTI_MODRM_TRUE,
+/*opcode 0x73*/ PTI_MODRM_TRUE,
+/*opcode 0x74*/ PTI_MODRM_TRUE,
+/*opcode 0x75*/ PTI_MODRM_TRUE,
+/*opcode 0x76*/ PTI_MODRM_TRUE,
+/*opcode 0x77*/ PTI_MODRM_FALSE,
+/*opcode 0x78*/ PTI_MODRM_TRUE,
+/*opcode 0x79*/ PTI_MODRM_TRUE,
+/*opcode 0x7a*/ PTI_MODRM_TRUE,
+/*opcode 0x7b*/ PTI_MODRM_TRUE,
+/*opcode 0x7c*/ PTI_MODRM_TRUE,
+/*opcode 0x7d*/ PTI_MODRM_TRUE,
+/*opcode 0x7e*/ PTI_MODRM_TRUE,
+/*opcode 0x7f*/ PTI_MODRM_TRUE,
+/*opcode 0x80*/ PTI_MODRM_FALSE,
+/*opcode 0x81*/ PTI_MODRM_FALSE,
+/*opcode 0x82*/ PTI_MODRM_FALSE,
+/*opcode 0x83*/ PTI_MODRM_FALSE,
+/*opcode 0x84*/ PTI_MODRM_FALSE,
+/*opcode 0x85*/ PTI_MODRM_FALSE,
+/*opcode 0x86*/ PTI_MODRM_FALSE,
+/*opcode 0x87*/ PTI_MODRM_FALSE,
+/*opcode 0x88*/ PTI_MODRM_FALSE,
+/*opcode 0x89*/ PTI_MODRM_FALSE,
+/*opcode 0x8a*/ PTI_MODRM_FALSE,
+/*opcode 0x8b*/ PTI_MODRM_FALSE,
+/*opcode 0x8c*/ PTI_MODRM_FALSE,
+/*opcode 0x8d*/ PTI_MODRM_FALSE,
+/*opcode 0x8e*/ PTI_MODRM_FALSE,
+/*opcode 0x8f*/ PTI_MODRM_FALSE,
+/*opcode 0x90*/ PTI_MODRM_TRUE,
+/*opcode 0x91*/ PTI_MODRM_TRUE,
+/*opcode 0x92*/ PTI_MODRM_TRUE,
+/*opcode 0x93*/ PTI_MODRM_TRUE,
+/*opcode 0x94*/ PTI_MODRM_TRUE,
+/*opcode 0x95*/ PTI_MODRM_TRUE,
+/*opcode 0x96*/ PTI_MODRM_TRUE,
+/*opcode 0x97*/ PTI_MODRM_TRUE,
+/*opcode 0x98*/ PTI_MODRM_TRUE,
+/*opcode 0x99*/ PTI_MODRM_TRUE,
+/*opcode 0x9a*/ PTI_MODRM_TRUE,
+/*opcode 0x9b*/ PTI_MODRM_TRUE,
+/*opcode 0x9c*/ PTI_MODRM_TRUE,
+/*opcode 0x9d*/ PTI_MODRM_TRUE,
+/*opcode 0x9e*/ PTI_MODRM_TRUE,
+/*opcode 0x9f*/ PTI_MODRM_TRUE,
+/*opcode 0xa0*/ PTI_MODRM_FALSE,
+/*opcode 0xa1*/ PTI_MODRM_FALSE,
+/*opcode 0xa2*/ PTI_MODRM_FALSE,
+/*opcode 0xa3*/ PTI_MODRM_TRUE,
+/*opcode 0xa4*/ PTI_MODRM_TRUE,
+/*opcode 0xa5*/ PTI_MODRM_TRUE,
+/*opcode 0xa6*/ PTI_MODRM_UNDEF,
+/*opcode 0xa7*/ PTI_MODRM_UNDEF,
+/*opcode 0xa8*/ PTI_MODRM_FALSE,
+/*opcode 0xa9*/ PTI_MODRM_FALSE,
+/*opcode 0xaa*/ PTI_MODRM_FALSE,
+/*opcode 0xab*/ PTI_MODRM_TRUE,
+/*opcode 0xac*/ PTI_MODRM_TRUE,
+/*opcode 0xad*/ PTI_MODRM_TRUE,
+/*opcode 0xae*/ PTI_MODRM_TRUE,
+/*opcode 0xaf*/ PTI_MODRM_TRUE,
+/*opcode 0xb0*/ PTI_MODRM_TRUE,
+/*opcode 0xb1*/ PTI_MODRM_TRUE,
+/*opcode 0xb2*/ PTI_MODRM_TRUE,
+/*opcode 0xb3*/ PTI_MODRM_TRUE,
+/*opcode 0xb4*/ PTI_MODRM_TRUE,
+/*opcode 0xb5*/ PTI_MODRM_TRUE,
+/*opcode 0xb6*/ PTI_MODRM_TRUE,
+/*opcode 0xb7*/ PTI_MODRM_TRUE,
+/*opcode 0xb8*/ PTI_MODRM_TRUE,
+/*opcode 0xb9*/ PTI_MODRM_UNDEF,
+/*opcode 0xba*/ PTI_MODRM_TRUE,
+/*opcode 0xbb*/ PTI_MODRM_TRUE,
+/*opcode 0xbc*/ PTI_MODRM_TRUE,
+/*opcode 0xbd*/ PTI_MODRM_TRUE,
+/*opcode 0xbe*/ PTI_MODRM_TRUE,
+/*opcode 0xbf*/ PTI_MODRM_TRUE,
+/*opcode 0xc0*/ PTI_MODRM_TRUE,
+/*opcode 0xc1*/ PTI_MODRM_TRUE,
+/*opcode 0xc2*/ PTI_MODRM_TRUE,
+/*opcode 0xc3*/ PTI_MODRM_TRUE,
+/*opcode 0xc4*/ PTI_MODRM_TRUE,
+/*opcode 0xc5*/ PTI_MODRM_TRUE,
+/*opcode 0xc6*/ PTI_MODRM_TRUE,
+/*opcode 0xc7*/ PTI_MODRM_TRUE,
+/*opcode 0xc8*/ PTI_MODRM_FALSE,
+/*opcode 0xc9*/ PTI_MODRM_FALSE,
+/*opcode 0xca*/ PTI_MODRM_FALSE,
+/*opcode 0xcb*/ PTI_MODRM_FALSE,
+/*opcode 0xcc*/ PTI_MODRM_FALSE,
+/*opcode 0xcd*/ PTI_MODRM_FALSE,
+/*opcode 0xce*/ PTI_MODRM_FALSE,
+/*opcode 0xcf*/ PTI_MODRM_FALSE,
+/*opcode 0xd0*/ PTI_MODRM_TRUE,
+/*opcode 0xd1*/ PTI_MODRM_TRUE,
+/*opcode 0xd2*/ PTI_MODRM_TRUE,
+/*opcode 0xd3*/ PTI_MODRM_TRUE,
+/*opcode 0xd4*/ PTI_MODRM_TRUE,
+/*opcode 0xd5*/ PTI_MODRM_TRUE,
+/*opcode 0xd6*/ PTI_MODRM_TRUE,
+/*opcode 0xd7*/ PTI_MODRM_TRUE,
+/*opcode 0xd8*/ PTI_MODRM_TRUE,
+/*opcode 0xd9*/ PTI_MODRM_TRUE,
+/*opcode 0xda*/ PTI_MODRM_TRUE,
+/*opcode 0xdb*/ PTI_MODRM_TRUE,
+/*opcode 0xdc*/ PTI_MODRM_TRUE,
+/*opcode 0xdd*/ PTI_MODRM_TRUE,
+/*opcode 0xde*/ PTI_MODRM_TRUE,
+/*opcode 0xdf*/ PTI_MODRM_TRUE,
+/*opcode 0xe0*/ PTI_MODRM_TRUE,
+/*opcode 0xe1*/ PTI_MODRM_TRUE,
+/*opcode 0xe2*/ PTI_MODRM_TRUE,
+/*opcode 0xe3*/ PTI_MODRM_TRUE,
+/*opcode 0xe4*/ PTI_MODRM_TRUE,
+/*opcode 0xe5*/ PTI_MODRM_TRUE,
+/*opcode 0xe6*/ PTI_MODRM_TRUE,
+/*opcode 0xe7*/ PTI_MODRM_TRUE,
+/*opcode 0xe8*/ PTI_MODRM_TRUE,
+/*opcode 0xe9*/ PTI_MODRM_TRUE,
+/*opcode 0xea*/ PTI_MODRM_TRUE,
+/*opcode 0xeb*/ PTI_MODRM_TRUE,
+/*opcode 0xec*/ PTI_MODRM_TRUE,
+/*opcode 0xed*/ PTI_MODRM_TRUE,
+/*opcode 0xee*/ PTI_MODRM_TRUE,
+/*opcode 0xef*/ PTI_MODRM_TRUE,
+/*opcode 0xf0*/ PTI_MODRM_TRUE,
+/*opcode 0xf1*/ PTI_MODRM_TRUE,
+/*opcode 0xf2*/ PTI_MODRM_TRUE,
+/*opcode 0xf3*/ PTI_MODRM_TRUE,
+/*opcode 0xf4*/ PTI_MODRM_TRUE,
+/*opcode 0xf5*/ PTI_MODRM_TRUE,
+/*opcode 0xf6*/ PTI_MODRM_TRUE,
+/*opcode 0xf7*/ PTI_MODRM_TRUE,
+/*opcode 0xf8*/ PTI_MODRM_TRUE,
+/*opcode 0xf9*/ PTI_MODRM_TRUE,
+/*opcode 0xfa*/ PTI_MODRM_TRUE,
+/*opcode 0xfb*/ PTI_MODRM_TRUE,
+/*opcode 0xfc*/ PTI_MODRM_TRUE,
+/*opcode 0xfd*/ PTI_MODRM_TRUE,
+/*opcode 0xfe*/ PTI_MODRM_TRUE,
+/*opcode 0xff*/ PTI_MODRM_UNDEF,
+};
diff --git a/libipt/internal/include/windows/pt_section_windows.h b/libipt/internal/include/windows/pt_section_windows.h
new file mode 100644
index 000000000000..96b101532a9b
--- /dev/null
+++ b/libipt/internal/include/windows/pt_section_windows.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SECTION_WINDOWS_H
+#define PT_SECTION_WINDOWS_H
+
+#include <windows.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+struct pt_section;
+
+
+/* Fstat-based file status. */
+struct pt_sec_windows_status {
+ /* The file status. */
+ struct _stat stat;
+};
+
+/* FileView-based section mapping information. */
+struct pt_sec_windows_mapping {
+ /* The file descriptor. */
+ int fd;
+
+ /* The FileMapping handle. */
+ HANDLE mh;
+
+ /* The mmap base address. */
+ uint8_t *base;
+
+ /* The begin and end of the mapped memory. */
+ const uint8_t *begin, *end;
+};
+
+
+/* Map a section.
+ *
+ * The caller has already opened the file for reading.
+ *
+ * On success, sets @section's mapping, unmap, and read pointers.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_invalid if @section can't be mapped.
+ */
+extern int pt_sec_windows_map(struct pt_section *section, int fd);
+
+/* Unmap a section.
+ *
+ * On success, clears @section's mapping, unmap, and read pointers.
+ *
+ * This function should not be called directly; call @section->unmap() instead.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_sec_windows_unmap(struct pt_section *section);
+
+/* Read memory from an mmaped section.
+ *
+ * Reads at most @size bytes from @section at @offset into @buffer.
+ *
+ * This function should not be called directly; call @section->read() instead.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ * Returns -pte_invalid if @section or @buffer are NULL.
+ * Returns -pte_nomap if @offset is beyond the end of the section.
+ */
+extern int pt_sec_windows_read(const struct pt_section *section,
+ uint8_t *buffer, uint16_t size,
+ uint64_t offset);
+
+/* Compute the memory size of a section.
+ *
+ * On success, provides the amount of memory used for mapping @section in bytes
+ * in @size.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @size is NULL.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_sec_windows_memsize(const struct pt_section *section,
+ uint64_t *size);
+
+#endif /* PT_SECTION_WINDOWS_H */
diff --git a/libipt/src/posix/init.c b/libipt/src/posix/init.c
new file mode 100644
index 000000000000..dc20a432b02a
--- /dev/null
+++ b/libipt/src/posix/init.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_ild.h"
+
+
+static void __attribute__((constructor)) init(void)
+{
+ /* Initialize the Intel(R) Processor Trace instruction decoder. */
+ pt_ild_init();
+}
diff --git a/libipt/src/posix/pt_cpuid.c b/libipt/src/posix/pt_cpuid.c
new file mode 100644
index 000000000000..0ca755dd3fe9
--- /dev/null
+++ b/libipt/src/posix/pt_cpuid.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_cpuid.h"
+
+#include <cpuid.h>
+
+extern void pt_cpuid(uint32_t leaf, uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ __get_cpuid(leaf, eax, ebx, ecx, edx);
+}
diff --git a/libipt/src/posix/pt_section_posix.c b/libipt/src/posix/pt_section_posix.c
new file mode 100644
index 000000000000..392ce4ecc6bb
--- /dev/null
+++ b/libipt/src/posix/pt_section_posix.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_section.h"
+#include "pt_section_posix.h"
+#include "pt_section_file.h"
+
+#include "intel-pt.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+
+int pt_section_mk_status(void **pstatus, uint64_t *psize, const char *filename)
+{
+ struct pt_sec_posix_status *status;
+ struct stat buffer;
+ int errcode;
+
+ if (!pstatus || !psize)
+ return -pte_internal;
+
+ errcode = stat(filename, &buffer);
+ if (errcode < 0)
+ return errcode;
+
+ if (buffer.st_size < 0)
+ return -pte_bad_image;
+
+ status = malloc(sizeof(*status));
+ if (!status)
+ return -pte_nomem;
+
+ status->stat = buffer;
+
+ *pstatus = status;
+ *psize = buffer.st_size;
+
+ return 0;
+}
+
+static int check_file_status(struct pt_section *section, int fd)
+{
+ struct pt_sec_posix_status *status;
+ struct stat stat;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = fstat(fd, &stat);
+ if (errcode)
+ return -pte_bad_image;
+
+ status = section->status;
+ if (!status)
+ return -pte_internal;
+
+ if (stat.st_size != status->stat.st_size)
+ return -pte_bad_image;
+
+ if (stat.st_mtime != status->stat.st_mtime)
+ return -pte_bad_image;
+
+ return 0;
+}
+
+int pt_sec_posix_map(struct pt_section *section, int fd)
+{
+ struct pt_sec_posix_mapping *mapping;
+ uint64_t offset, size, adjustment;
+ uint8_t *base;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ offset = section->offset;
+ size = section->size;
+
+ adjustment = offset % sysconf(_SC_PAGESIZE);
+
+ offset -= adjustment;
+ size += adjustment;
+
+ /* The section is supposed to fit into the file so we shouldn't
+ * see any overflows, here.
+ */
+ if (size < section->size)
+ return -pte_internal;
+
+ if (SIZE_MAX < size)
+ return -pte_nomem;
+
+ if (INT_MAX < offset)
+ return -pte_nomem;
+
+ base = mmap(NULL, (size_t) size, PROT_READ, MAP_SHARED, fd,
+ (off_t) offset);
+ if (base == MAP_FAILED)
+ return -pte_nomem;
+
+ mapping = malloc(sizeof(*mapping));
+ if (!mapping) {
+ errcode = -pte_nomem;
+ goto out_map;
+ }
+
+ mapping->base = base;
+ mapping->size = size;
+ mapping->begin = base + adjustment;
+ mapping->end = base + size;
+
+ section->mapping = mapping;
+ section->unmap = pt_sec_posix_unmap;
+ section->read = pt_sec_posix_read;
+ section->memsize = pt_sec_posix_memsize;
+
+ return 0;
+
+out_map:
+ munmap(base, (size_t) size);
+ return errcode;
+}
+
+static int pt_sec_posix_map_success(struct pt_section *section)
+{
+ uint16_t mcount;
+ int errcode, status;
+
+ if (!section)
+ return -pte_internal;
+
+ mcount = section->mcount + 1;
+ if (!mcount) {
+ (void) pt_section_unlock(section);
+ return -pte_overflow;
+ }
+
+ section->mcount = mcount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_section_on_map(section);
+ if (status < 0) {
+ /* We had to release the section lock for pt_section_on_map() so
+ * @section may have meanwhile been mapped by other threads.
+ *
+ * We still want to return the error so we release our mapping.
+ * Our caller does not yet know whether pt_section_map()
+ * succeeded.
+ */
+ (void) pt_section_unmap(section);
+ return status;
+ }
+
+ return 0;
+}
+
+int pt_section_map(struct pt_section *section)
+{
+ const char *filename;
+ FILE *file;
+ int fd, errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (section->mcount)
+ return pt_sec_posix_map_success(section);
+
+ if (section->mapping)
+ goto out_unlock;
+
+ filename = section->filename;
+ if (!filename)
+ goto out_unlock;
+
+ errcode = -pte_bad_image;
+ fd = open(filename, O_RDONLY);
+ if (fd == -1)
+ goto out_unlock;
+
+ errcode = check_file_status(section, fd);
+ if (errcode < 0)
+ goto out_fd;
+
+ /* We close the file on success. This does not unmap the section. */
+ errcode = pt_sec_posix_map(section, fd);
+ if (!errcode) {
+ close(fd);
+
+ return pt_sec_posix_map_success(section);
+ }
+
+ /* Fall back to file based sections - report the original error
+ * if we fail to convert the file descriptor.
+ */
+ file = fdopen(fd, "rb");
+ if (!file)
+ goto out_fd;
+
+ /* We need to keep the file open on success. It will be closed when
+ * the section is unmapped.
+ */
+ errcode = pt_sec_file_map(section, file);
+ if (!errcode)
+ return pt_sec_posix_map_success(section);
+
+ fclose(file);
+ goto out_unlock;
+
+out_fd:
+ close(fd);
+
+out_unlock:
+ (void) pt_section_unlock(section);
+ return errcode;
+}
+
+int pt_sec_posix_unmap(struct pt_section *section)
+{
+ struct pt_sec_posix_mapping *mapping;
+
+ if (!section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping || !section->unmap || !section->read || !section->memsize)
+ return -pte_internal;
+
+ section->mapping = NULL;
+ section->unmap = NULL;
+ section->read = NULL;
+ section->memsize = NULL;
+
+ munmap(mapping->base, (size_t) mapping->size);
+ free(mapping);
+
+ return 0;
+}
+
+int pt_sec_posix_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ struct pt_sec_posix_mapping *mapping;
+ const uint8_t *begin;
+
+ if (!buffer || !section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping)
+ return -pte_internal;
+
+ /* We already checked in pt_section_read() that the requested memory
+ * lies within the section's boundaries.
+ *
+ * And we checked that the entire section was mapped. There's no need
+ * to check for overflows, again.
+ */
+ begin = mapping->begin + offset;
+
+ memcpy(buffer, begin, size);
+ return (int) size;
+}
+
+int pt_sec_posix_memsize(const struct pt_section *section, uint64_t *size)
+{
+ struct pt_sec_posix_mapping *mapping;
+ const uint8_t *begin, *end;
+
+ if (!section || !size)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping)
+ return -pte_internal;
+
+ begin = mapping->base;
+ end = mapping->end;
+
+ if (!begin || !end || end < begin)
+ return -pte_internal;
+
+ *size = (uint64_t) (end - begin);
+
+ return 0;
+}
diff --git a/libipt/src/pt_asid.c b/libipt/src/pt_asid.c
new file mode 100644
index 000000000000..f492e0f7fd67
--- /dev/null
+++ b/libipt/src/pt_asid.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_asid.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+
+
+int pt_asid_from_user(struct pt_asid *asid, const struct pt_asid *user)
+{
+ if (!asid)
+ return -pte_internal;
+
+ pt_asid_init(asid);
+
+ if (user) {
+ size_t size;
+
+ size = user->size;
+
+ /* Ignore fields in the user's asid we don't know. */
+ if (sizeof(*asid) < size)
+ size = sizeof(*asid);
+
+ /* Copy (portions of) the user's asid. */
+ memcpy(asid, user, size);
+
+ /* We copied user's size - fix it. */
+ asid->size = sizeof(*asid);
+ }
+
+ return 0;
+}
+
+int pt_asid_to_user(struct pt_asid *user, const struct pt_asid *asid,
+ size_t size)
+{
+ if (!user || !asid)
+ return -pte_internal;
+
+ /* We need at least space for the size field. */
+ if (size < sizeof(asid->size))
+ return -pte_invalid;
+
+ /* Only provide the fields we actually have. */
+ if (sizeof(*asid) < size)
+ size = sizeof(*asid);
+
+ /* Copy (portions of) our asid to the user's. */
+ memcpy(user, asid, size);
+
+ /* We copied our size - fix it. */
+ user->size = size;
+
+ return 0;
+}
+
+int pt_asid_match(const struct pt_asid *lhs, const struct pt_asid *rhs)
+{
+ uint64_t lcr3, rcr3, lvmcs, rvmcs;
+
+ if (!lhs || !rhs)
+ return -pte_internal;
+
+ lcr3 = lhs->cr3;
+ rcr3 = rhs->cr3;
+
+ if (lcr3 != rcr3 && lcr3 != pt_asid_no_cr3 && rcr3 != pt_asid_no_cr3)
+ return 0;
+
+ lvmcs = lhs->vmcs;
+ rvmcs = rhs->vmcs;
+
+ if (lvmcs != rvmcs && lvmcs != pt_asid_no_vmcs &&
+ rvmcs != pt_asid_no_vmcs)
+ return 0;
+
+ return 1;
+}
diff --git a/libipt/src/pt_block_cache.c b/libipt/src/pt_block_cache.c
new file mode 100644
index 000000000000..abe6ea1f3ca2
--- /dev/null
+++ b/libipt/src/pt_block_cache.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_block_cache.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+
+struct pt_block_cache *pt_bcache_alloc(uint64_t nentries)
+{
+ struct pt_block_cache *bcache;
+ uint64_t size;
+
+ if (!nentries || (UINT32_MAX < nentries))
+ return NULL;
+
+ size = sizeof(*bcache) + (nentries * sizeof(struct pt_bcache_entry));
+ if (SIZE_MAX < size)
+ return NULL;
+
+ bcache = malloc((size_t) size);
+ if (!bcache)
+ return NULL;
+
+ memset(bcache, 0, (size_t) size);
+ bcache->nentries = (uint32_t) nentries;
+
+ return bcache;
+}
+
+void pt_bcache_free(struct pt_block_cache *bcache)
+{
+ free(bcache);
+}
+
+int pt_bcache_add(struct pt_block_cache *bcache, uint64_t index,
+ struct pt_bcache_entry bce)
+{
+ if (!bcache)
+ return -pte_internal;
+
+ if (bcache->nentries <= index)
+ return -pte_internal;
+
+ /* We rely on guaranteed atomic operations as specified in section 8.1.1
+ * in Volume 3A of the Intel(R) Software Developer's Manual at
+ * http://www.intel.com/sdm.
+ */
+ bcache->entry[(uint32_t) index] = bce;
+
+ return 0;
+}
+
+int pt_bcache_lookup(struct pt_bcache_entry *bce,
+ const struct pt_block_cache *bcache, uint64_t index)
+{
+ if (!bce || !bcache)
+ return -pte_internal;
+
+ if (bcache->nentries <= index)
+ return -pte_internal;
+
+ /* We rely on guaranteed atomic operations as specified in section 8.1.1
+ * in Volume 3A of the Intel(R) Software Developer's Manual at
+ * http://www.intel.com/sdm.
+ */
+ *bce = bcache->entry[(uint32_t) index];
+
+ return 0;
+}
diff --git a/libipt/src/pt_block_decoder.c b/libipt/src/pt_block_decoder.c
new file mode 100644
index 000000000000..d6c816db65cc
--- /dev/null
+++ b/libipt/src/pt_block_decoder.c
@@ -0,0 +1,3469 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_block_decoder.h"
+#include "pt_block_cache.h"
+#include "pt_section.h"
+#include "pt_image.h"
+#include "pt_insn.h"
+#include "pt_config.h"
+#include "pt_asid.h"
+#include "pt_compiler.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+
+static int pt_blk_proceed_trailing_event(struct pt_block_decoder *,
+ struct pt_block *);
+
+
+static int pt_blk_status(const struct pt_block_decoder *decoder, int flags)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = decoder->status;
+
+ /* Indicate whether tracing is disabled or enabled.
+ *
+ * This duplicates the indication in struct pt_insn and covers the case
+ * where we indicate the status after synchronizing.
+ */
+ if (!decoder->enabled)
+ flags |= pts_ip_suppressed;
+
+ /* Forward end-of-trace indications.
+ *
+ * Postpone it as long as we're still processing events, though.
+ */
+ if ((status & pts_eos) && !decoder->process_event)
+ flags |= pts_eos;
+
+ return flags;
+}
+
+static void pt_blk_reset(struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ decoder->mode = ptem_unknown;
+ decoder->ip = 0ull;
+ decoder->status = 0;
+ decoder->enabled = 0;
+ decoder->process_event = 0;
+ decoder->speculative = 0;
+ decoder->process_insn = 0;
+ decoder->bound_paging = 0;
+ decoder->bound_vmcs = 0;
+ decoder->bound_ptwrite = 0;
+
+ memset(&decoder->event, 0, sizeof(decoder->event));
+ pt_retstack_init(&decoder->retstack);
+ pt_asid_init(&decoder->asid);
+}
+
+/* Initialize the query decoder flags based on our flags. */
+
+static int pt_blk_init_qry_flags(struct pt_conf_flags *qflags,
+ const struct pt_conf_flags *flags)
+{
+ if (!qflags || !flags)
+ return -pte_internal;
+
+ memset(qflags, 0, sizeof(*qflags));
+
+ return 0;
+}
+
+int pt_blk_decoder_init(struct pt_block_decoder *decoder,
+ const struct pt_config *uconfig)
+{
+ struct pt_config config;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ errcode = pt_config_from_user(&config, uconfig);
+ if (errcode < 0)
+ return errcode;
+
+ /* The user supplied decoder flags. */
+ decoder->flags = config.flags;
+
+ /* Set the flags we need for the query decoder we use. */
+ errcode = pt_blk_init_qry_flags(&config.flags, &decoder->flags);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_qry_decoder_init(&decoder->query, &config);
+ if (errcode < 0)
+ return errcode;
+
+ pt_image_init(&decoder->default_image, NULL);
+ decoder->image = &decoder->default_image;
+
+ errcode = pt_msec_cache_init(&decoder->scache);
+ if (errcode < 0)
+ return errcode;
+
+ pt_blk_reset(decoder);
+
+ return 0;
+}
+
+void pt_blk_decoder_fini(struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ pt_msec_cache_fini(&decoder->scache);
+ pt_image_fini(&decoder->default_image);
+ pt_qry_decoder_fini(&decoder->query);
+}
+
+struct pt_block_decoder *
+pt_blk_alloc_decoder(const struct pt_config *config)
+{
+ struct pt_block_decoder *decoder;
+ int errcode;
+
+ decoder = malloc(sizeof(*decoder));
+ if (!decoder)
+ return NULL;
+
+ errcode = pt_blk_decoder_init(decoder, config);
+ if (errcode < 0) {
+ free(decoder);
+ return NULL;
+ }
+
+ return decoder;
+}
+
+void pt_blk_free_decoder(struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ pt_blk_decoder_fini(decoder);
+ free(decoder);
+}
+
+/* Maybe synthesize a tick event.
+ *
+ * If we're not already processing events, check the current time against the
+ * last event's time. If it changed, synthesize a tick event with the new time.
+ *
+ * Returns zero if no tick event has been created.
+ * Returns a positive integer if a tick event has been created.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_tick(struct pt_block_decoder *decoder, uint64_t ip)
+{
+ struct pt_event *ev;
+ uint64_t tsc;
+ uint32_t lost_mtc, lost_cyc;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* We're not generating tick events if tracing is disabled. */
+ if (!decoder->enabled)
+ return -pte_internal;
+
+ /* Events already provide a timestamp so there is no need to synthesize
+ * an artificial tick event. There's no room, either, since this would
+ * overwrite the in-progress event.
+ *
+ * In rare cases where we need to proceed to an event location using
+ * trace this may cause us to miss a timing update if the event is not
+ * forwarded to the user.
+ *
+ * The only case I can come up with at the moment is a MODE.EXEC binding
+ * to the TIP IP of a far branch.
+ */
+ if (decoder->process_event)
+ return 0;
+
+ errcode = pt_qry_time(&decoder->query, &tsc, &lost_mtc, &lost_cyc);
+ if (errcode < 0) {
+ /* If we don't have wall-clock time, we use relative time. */
+ if (errcode != -pte_no_time)
+ return errcode;
+ }
+
+ ev = &decoder->event;
+
+ /* We're done if time has not changed since the last event. */
+ if (tsc == ev->tsc)
+ return 0;
+
+ /* Time has changed so we create a new tick event. */
+ memset(ev, 0, sizeof(*ev));
+ ev->type = ptev_tick;
+ ev->variant.tick.ip = ip;
+
+ /* Indicate if we have wall-clock time or only relative time. */
+ if (errcode != -pte_no_time)
+ ev->has_tsc = 1;
+ ev->tsc = tsc;
+ ev->lost_mtc = lost_mtc;
+ ev->lost_cyc = lost_cyc;
+
+ /* We now have an event to process. */
+ decoder->process_event = 1;
+
+ return 1;
+}
+
+/* Query an indirect branch.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_indirect_branch(struct pt_block_decoder *decoder,
+ uint64_t *ip)
+{
+ uint64_t evip;
+ int status, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ evip = decoder->ip;
+
+ status = pt_qry_indirect_branch(&decoder->query, ip);
+ if (status < 0)
+ return status;
+
+ if (decoder->flags.variant.block.enable_tick_events) {
+ errcode = pt_blk_tick(decoder, evip);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return status;
+}
+
+/* Query a conditional branch.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_cond_branch(struct pt_block_decoder *decoder, int *taken)
+{
+ int status, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_cond_branch(&decoder->query, taken);
+ if (status < 0)
+ return status;
+
+ if (decoder->flags.variant.block.enable_tick_events) {
+ errcode = pt_blk_tick(decoder, decoder->ip);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return status;
+}
+
+static int pt_blk_start(struct pt_block_decoder *decoder, int status)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ if (status < 0)
+ return status;
+
+ decoder->status = status;
+ if (!(status & pts_ip_suppressed))
+ decoder->enabled = 1;
+
+ /* We will always have an event.
+ *
+ * If we synchronized onto an empty PSB+, tracing is disabled and we'll
+ * process events until the enabled event.
+ *
+ * If tracing is enabled, PSB+ must at least provide the execution mode,
+ * which we're going to forward to the user.
+ */
+ return pt_blk_proceed_trailing_event(decoder, NULL);
+}
+
+static int pt_blk_sync_reset(struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ pt_blk_reset(decoder);
+
+ return 0;
+}
+
+int pt_blk_sync_forward(struct pt_block_decoder *decoder)
+{
+ int errcode, status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ errcode = pt_blk_sync_reset(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_qry_sync_forward(&decoder->query, &decoder->ip);
+
+ return pt_blk_start(decoder, status);
+}
+
+int pt_blk_sync_backward(struct pt_block_decoder *decoder)
+{
+ int errcode, status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ errcode = pt_blk_sync_reset(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_qry_sync_backward(&decoder->query, &decoder->ip);
+
+ return pt_blk_start(decoder, status);
+}
+
+int pt_blk_sync_set(struct pt_block_decoder *decoder, uint64_t offset)
+{
+ int errcode, status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ errcode = pt_blk_sync_reset(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_qry_sync_set(&decoder->query, &decoder->ip, offset);
+
+ return pt_blk_start(decoder, status);
+}
+
+int pt_blk_get_offset(const struct pt_block_decoder *decoder, uint64_t *offset)
+{
+ if (!decoder)
+ return -pte_invalid;
+
+ return pt_qry_get_offset(&decoder->query, offset);
+}
+
+int pt_blk_get_sync_offset(const struct pt_block_decoder *decoder,
+ uint64_t *offset)
+{
+ if (!decoder)
+ return -pte_invalid;
+
+ return pt_qry_get_sync_offset(&decoder->query, offset);
+}
+
+struct pt_image *pt_blk_get_image(struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return NULL;
+
+ return decoder->image;
+}
+
+int pt_blk_set_image(struct pt_block_decoder *decoder, struct pt_image *image)
+{
+ if (!decoder)
+ return -pte_invalid;
+
+ if (!image)
+ image = &decoder->default_image;
+
+ decoder->image = image;
+ return 0;
+}
+
+const struct pt_config *
+pt_blk_get_config(const struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return NULL;
+
+ return pt_qry_get_config(&decoder->query);
+}
+
+int pt_blk_time(struct pt_block_decoder *decoder, uint64_t *time,
+ uint32_t *lost_mtc, uint32_t *lost_cyc)
+{
+ if (!decoder || !time)
+ return -pte_invalid;
+
+ return pt_qry_time(&decoder->query, time, lost_mtc, lost_cyc);
+}
+
+int pt_blk_core_bus_ratio(struct pt_block_decoder *decoder, uint32_t *cbr)
+{
+ if (!decoder || !cbr)
+ return -pte_invalid;
+
+ return pt_qry_core_bus_ratio(&decoder->query, cbr);
+}
+
+int pt_blk_asid(const struct pt_block_decoder *decoder, struct pt_asid *asid,
+ size_t size)
+{
+ if (!decoder || !asid)
+ return -pte_invalid;
+
+ return pt_asid_to_user(asid, &decoder->asid, size);
+}
+
+/* Fetch the next pending event.
+ *
+ * Checks for pending events. If an event is pending, fetches it (if not
+ * already in process).
+ *
+ * Returns zero if no event is pending.
+ * Returns a positive integer if an event is pending or in process.
+ * Returns a negative error code otherwise.
+ */
+static inline int pt_blk_fetch_event(struct pt_block_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ if (decoder->process_event)
+ return 1;
+
+ if (!(decoder->status & pts_event_pending))
+ return 0;
+
+ status = pt_qry_event(&decoder->query, &decoder->event,
+ sizeof(decoder->event));
+ if (status < 0)
+ return status;
+
+ decoder->process_event = 1;
+ decoder->status = status;
+
+ return 1;
+}
+
+static inline int pt_blk_block_is_empty(const struct pt_block *block)
+{
+ if (!block)
+ return 1;
+
+ return !block->ninsn;
+}
+
+static inline int block_to_user(struct pt_block *ublock, size_t size,
+ const struct pt_block *block)
+{
+ if (!ublock || !block)
+ return -pte_internal;
+
+ if (ublock == block)
+ return 0;
+
+ /* Zero out any unknown bytes. */
+ if (sizeof(*block) < size) {
+ memset(ublock + sizeof(*block), 0, size - sizeof(*block));
+
+ size = sizeof(*block);
+ }
+
+ memcpy(ublock, block, size);
+
+ return 0;
+}
+
+static int pt_insn_false(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ (void) insn;
+ (void) iext;
+
+ return 0;
+}
+
+/* Determine the next IP using trace.
+ *
+ * Tries to determine the IP of the next instruction using trace and provides it
+ * in @pip.
+ *
+ * Not requiring trace to determine the IP is treated as an internal error.
+ *
+ * Does not update the return compression stack for indirect calls. This is
+ * expected to have been done, already, when trying to determine the next IP
+ * without using trace.
+ *
+ * Does not update @decoder->status. The caller is expected to do that.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ * Returns -pte_internal if @pip, @decoder, @insn, or @iext are NULL.
+ * Returns -pte_internal if no trace is required.
+ */
+static int pt_blk_next_ip(uint64_t *pip, struct pt_block_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ int status, errcode;
+
+ if (!pip || !decoder || !insn || !iext)
+ return -pte_internal;
+
+ /* We handle non-taken conditional branches, and compressed returns
+ * directly in the switch.
+ *
+ * All kinds of branches are handled below the switch.
+ */
+ switch (insn->iclass) {
+ case ptic_cond_jump: {
+ uint64_t ip;
+ int taken;
+
+ status = pt_blk_cond_branch(decoder, &taken);
+ if (status < 0)
+ return status;
+
+ ip = insn->ip + insn->size;
+ if (taken)
+ ip += iext->variant.branch.displacement;
+
+ *pip = ip;
+ return status;
+ }
+
+ case ptic_return: {
+ int taken;
+
+ /* Check for a compressed return. */
+ status = pt_blk_cond_branch(decoder, &taken);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ break;
+ }
+
+ /* A compressed return is indicated by a taken conditional
+ * branch.
+ */
+ if (!taken)
+ return -pte_bad_retcomp;
+
+ errcode = pt_retstack_pop(&decoder->retstack, pip);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+ }
+
+ case ptic_jump:
+ case ptic_call:
+ /* A direct jump or call wouldn't require trace. */
+ if (iext->variant.branch.is_direct)
+ return -pte_internal;
+
+ break;
+
+ case ptic_far_call:
+ case ptic_far_return:
+ case ptic_far_jump:
+ break;
+
+ case ptic_ptwrite:
+ case ptic_other:
+ return -pte_internal;
+
+ case ptic_error:
+ return -pte_bad_insn;
+ }
+
+ /* Process an indirect branch.
+ *
+ * This covers indirect jumps and calls, non-compressed returns, and all
+ * flavors of far transfers.
+ */
+ return pt_blk_indirect_branch(decoder, pip);
+}
+
+/* Proceed to the next IP using trace.
+ *
+ * We failed to proceed without trace. This ends the current block. Now use
+ * trace to do one final step to determine the start IP of the next block.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_proceed_with_trace(struct pt_block_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_blk_next_ip(&decoder->ip, decoder, insn, iext);
+ if (status < 0)
+ return status;
+
+ /* Preserve the query decoder's response which indicates upcoming
+ * events.
+ */
+ decoder->status = status;
+
+ /* We do need an IP in order to proceed. */
+ if (status & pts_ip_suppressed)
+ return -pte_noip;
+
+ return 0;
+}
+
+/* Decode one instruction in a known section.
+ *
+ * Decode the instruction at @insn->ip in @msec assuming execution mode
+ * @insn->mode.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_decode_in_section(struct pt_insn *insn,
+ struct pt_insn_ext *iext,
+ const struct pt_mapped_section *msec)
+{
+ int status;
+
+ if (!insn || !iext)
+ return -pte_internal;
+
+ /* We know that @ip is contained in @section.
+ *
+ * Note that we need to translate @ip into a section offset.
+ */
+ status = pt_msec_read(msec, insn->raw, sizeof(insn->raw), insn->ip);
+ if (status < 0)
+ return status;
+
+ /* We initialize @insn->size to the maximal possible size. It will be
+ * set to the actual size during instruction decode.
+ */
+ insn->size = (uint8_t) status;
+
+ return pt_ild_decode(insn, iext);
+}
+
+/* Update the return-address stack if @insn is a near call.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static inline int pt_blk_log_call(struct pt_block_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!decoder || !insn || !iext)
+ return -pte_internal;
+
+ if (insn->iclass != ptic_call)
+ return 0;
+
+ /* Ignore direct calls to the next instruction that are used for
+ * position independent code.
+ */
+ if (iext->variant.branch.is_direct &&
+ !iext->variant.branch.displacement)
+ return 0;
+
+ return pt_retstack_push(&decoder->retstack, insn->ip + insn->size);
+}
+
+/* Proceed by one instruction.
+ *
+ * Tries to decode the instruction at @decoder->ip and, on success, adds it to
+ * @block and provides it in @pinsn and @piext.
+ *
+ * The instruction will not be added if:
+ *
+ * - the memory could not be read: return error
+ * - it could not be decoded: return error
+ * - @block is already full: return zero
+ * - @block would switch sections: return zero
+ *
+ * Returns a positive integer if the instruction was added.
+ * Returns zero if the instruction didn't fit into @block.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_one_insn(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ struct pt_insn *pinsn,
+ struct pt_insn_ext *piext)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ uint16_t ninsn;
+ int status;
+
+ if (!decoder || !block || !pinsn || !piext)
+ return -pte_internal;
+
+ /* There's nothing to do if there is no room in @block. */
+ ninsn = block->ninsn + 1;
+ if (!ninsn)
+ return 0;
+
+ /* The truncated instruction must be last. */
+ if (block->truncated)
+ return 0;
+
+ memset(&insn, 0, sizeof(insn));
+ memset(&iext, 0, sizeof(iext));
+
+ insn.mode = decoder->mode;
+ insn.ip = decoder->ip;
+
+ status = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid);
+ if (status < 0)
+ return status;
+
+ /* We do not switch sections inside a block. */
+ if (insn.isid != block->isid) {
+ if (!pt_blk_block_is_empty(block))
+ return 0;
+
+ block->isid = insn.isid;
+ }
+
+ /* If we couldn't read @insn's memory in one chunk from @insn.isid, we
+ * provide the memory in @block.
+ */
+ if (insn.truncated) {
+ memcpy(block->raw, insn.raw, insn.size);
+ block->size = insn.size;
+ block->truncated = 1;
+ }
+
+ /* Log calls' return addresses for return compression. */
+ status = pt_blk_log_call(decoder, &insn, &iext);
+ if (status < 0)
+ return status;
+
+ /* We have a new instruction. */
+ block->iclass = insn.iclass;
+ block->end_ip = insn.ip;
+ block->ninsn = ninsn;
+
+ *pinsn = insn;
+ *piext = iext;
+
+ return 1;
+}
+
+
+/* Proceed to a particular type of instruction without using trace.
+ *
+ * Proceed until we reach an instruction for which @predicate returns a positive
+ * integer or until:
+ *
+ * - @predicate returns an error: return error
+ * - @block is full: return zero
+ * - @block would switch sections: return zero
+ * - we would need trace: return -pte_bad_query
+ *
+ * Provide the last instruction that was reached in @insn and @iext.
+ *
+ * Update @decoder->ip to point to the last IP that was reached. If we fail due
+ * to lack of trace or if we reach a desired instruction, this is @insn->ip;
+ * otherwise this is the next instruction's IP.
+ *
+ * Returns a positive integer if a suitable instruction was reached.
+ * Returns zero if no such instruction was reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_insn(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ struct pt_insn *insn,
+ struct pt_insn_ext *iext,
+ int (*predicate)(const struct pt_insn *,
+ const struct pt_insn_ext *))
+{
+ int status;
+
+ if (!decoder || !insn || !predicate)
+ return -pte_internal;
+
+ for (;;) {
+ status = pt_blk_proceed_one_insn(decoder, block, insn, iext);
+ if (status <= 0)
+ return status;
+
+ /* We're done if this instruction matches the spec (positive
+ * status) or we run into an error (negative status).
+ */
+ status = predicate(insn, iext);
+ if (status != 0)
+ return status;
+
+ /* Let's see if we can proceed to the next IP without trace. */
+ status = pt_insn_next_ip(&decoder->ip, insn, iext);
+ if (status < 0)
+ return status;
+
+ /* End the block if the user asked us to.
+ *
+ * We only need to take care about direct near branches.
+ * Indirect and far branches require trace and will naturally
+ * end a block.
+ */
+ if ((decoder->flags.variant.block.end_on_call &&
+ (insn->iclass == ptic_call)) ||
+ (decoder->flags.variant.block.end_on_jump &&
+ (insn->iclass == ptic_jump)))
+ return 0;
+ }
+}
+
+/* Proceed to a particular IP without using trace.
+ *
+ * Proceed until we reach @ip or until:
+ *
+ * - @block is full: return zero
+ * - @block would switch sections: return zero
+ * - we would need trace: return -pte_bad_query
+ *
+ * Provide the last instruction that was reached in @insn and @iext. If we
+ * reached @ip, this is the instruction preceding it.
+ *
+ * Update @decoder->ip to point to the last IP that was reached. If we fail due
+ * to lack of trace, this is @insn->ip; otherwise this is the next instruction's
+ * IP.
+ *
+ * Returns a positive integer if @ip was reached.
+ * Returns zero if no such instruction was reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_ip(struct pt_block_decoder *decoder,
+ struct pt_block *block, struct pt_insn *insn,
+ struct pt_insn_ext *iext, uint64_t ip)
+{
+ int status;
+
+ if (!decoder || !insn)
+ return -pte_internal;
+
+ for (;;) {
+ /* We're done when we reach @ip. We may not even have to decode
+ * a single instruction in some cases.
+ */
+ if (decoder->ip == ip)
+ return 1;
+
+ status = pt_blk_proceed_one_insn(decoder, block, insn, iext);
+ if (status <= 0)
+ return status;
+
+ /* Let's see if we can proceed to the next IP without trace. */
+ status = pt_insn_next_ip(&decoder->ip, insn, iext);
+ if (status < 0)
+ return status;
+
+ /* End the block if the user asked us to.
+ *
+ * We only need to take care about direct near branches.
+ * Indirect and far branches require trace and will naturally
+ * end a block.
+ *
+ * The call at the end of the block may have reached @ip; make
+ * sure to indicate that.
+ */
+ if ((decoder->flags.variant.block.end_on_call &&
+ (insn->iclass == ptic_call)) ||
+ (decoder->flags.variant.block.end_on_jump &&
+ (insn->iclass == ptic_jump))) {
+ return (decoder->ip == ip ? 1 : 0);
+ }
+ }
+}
+
+/* Proceed to a particular IP with trace, if necessary.
+ *
+ * Proceed until we reach @ip or until:
+ *
+ * - @block is full: return zero
+ * - @block would switch sections: return zero
+ * - we need trace: return zero
+ *
+ * Update @decoder->ip to point to the last IP that was reached.
+ *
+ * A return of zero ends @block.
+ *
+ * Returns a positive integer if @ip was reached.
+ * Returns zero if no such instruction was reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_ip_with_trace(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ uint64_t ip)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int status;
+
+ /* Try to reach @ip without trace.
+ *
+ * We're also OK if @block overflowed or we switched sections and we
+ * have to try again in the next iteration.
+ */
+ status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext, ip);
+ if (status != -pte_bad_query)
+ return status;
+
+ /* Needing trace is not an error. We use trace to determine the next
+ * start IP and end the block.
+ */
+ return pt_blk_proceed_with_trace(decoder, &insn, &iext);
+}
+
+static int pt_insn_skl014(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!insn || !iext)
+ return 0;
+
+ switch (insn->iclass) {
+ default:
+ return 0;
+
+ case ptic_call:
+ case ptic_jump:
+ return iext->variant.branch.is_direct;
+
+ case ptic_other:
+ return pt_insn_changes_cr3(insn, iext);
+ }
+}
+
+/* Proceed to the location of a synchronous disabled event with suppressed IP
+ * considering SKL014.
+ *
+ * We have a (synchronous) disabled event pending. Proceed to the event
+ * location and indicate whether we were able to reach it.
+ *
+ * With SKL014 a TIP.PGD with suppressed IP may also be generated by a direct
+ * unconditional branch that clears FilterEn by jumping out of a filter region
+ * or into a TraceStop region. Use the filter configuration to determine the
+ * exact branch the event binds to.
+ *
+ * The last instruction that was reached is stored in @insn/@iext.
+ *
+ * Returns a positive integer if the event location was reached.
+ * Returns zero if the event location was not reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_skl014(struct pt_block_decoder *decoder,
+ struct pt_block *block, struct pt_insn *insn,
+ struct pt_insn_ext *iext)
+{
+ const struct pt_conf_addr_filter *addr_filter;
+ int status;
+
+ if (!decoder || !block || !insn || !iext)
+ return -pte_internal;
+
+ addr_filter = &decoder->query.config.addr_filter;
+ for (;;) {
+ uint64_t ip;
+
+ status = pt_blk_proceed_to_insn(decoder, block, insn, iext,
+ pt_insn_skl014);
+ if (status <= 0)
+ break;
+
+ /* The erratum doesn't apply if we can bind the event to a
+ * CR3-changing instruction.
+ */
+ if (pt_insn_changes_cr3(insn, iext))
+ break;
+
+ /* Check the filter against the branch target. */
+ status = pt_insn_next_ip(&ip, insn, iext);
+ if (status < 0)
+ break;
+
+ status = pt_filter_addr_check(addr_filter, ip);
+ if (status <= 0) {
+ /* We need to flip the indication.
+ *
+ * We reached the event location when @ip lies inside a
+ * tracing-disabled region.
+ */
+ if (!status)
+ status = 1;
+
+ break;
+ }
+
+ /* This is not the correct instruction. Proceed past it and try
+ * again.
+ */
+ decoder->ip = ip;
+
+ /* End the block if the user asked us to.
+ *
+ * We only need to take care about direct near branches.
+ * Indirect and far branches require trace and will naturally
+ * end a block.
+ */
+ if ((decoder->flags.variant.block.end_on_call &&
+ (insn->iclass == ptic_call)) ||
+ (decoder->flags.variant.block.end_on_jump &&
+ (insn->iclass == ptic_jump)))
+ break;
+ }
+
+ return status;
+}
+
+/* Proceed to the event location for a disabled event.
+ *
+ * We have a (synchronous) disabled event pending. Proceed to the event
+ * location and indicate whether we were able to reach it.
+ *
+ * The last instruction that was reached is stored in @insn/@iext.
+ *
+ * Returns a positive integer if the event location was reached.
+ * Returns zero if the event location was not reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_disabled(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ struct pt_insn *insn,
+ struct pt_insn_ext *iext,
+ const struct pt_event *ev)
+{
+ if (!decoder || !block || !ev)
+ return -pte_internal;
+
+ if (ev->ip_suppressed) {
+ /* Due to SKL014 the TIP.PGD payload may be suppressed also for
+ * direct branches.
+ *
+ * If we don't have a filter configuration we assume that no
+ * address filters were used and the erratum does not apply.
+ *
+ * We might otherwise disable tracing too early.
+ */
+ if (decoder->query.config.addr_filter.config.addr_cfg &&
+ decoder->query.config.errata.skl014)
+ return pt_blk_proceed_skl014(decoder, block, insn,
+ iext);
+
+ /* A synchronous disabled event also binds to far branches and
+ * CPL-changing instructions. Both would require trace,
+ * however, and are thus implicitly handled by erroring out.
+ *
+ * The would-require-trace error is handled by our caller.
+ */
+ return pt_blk_proceed_to_insn(decoder, block, insn, iext,
+ pt_insn_changes_cr3);
+ } else
+ return pt_blk_proceed_to_ip(decoder, block, insn, iext,
+ ev->variant.disabled.ip);
+}
+
+/* Set the expected resume address for a synchronous disable.
+ *
+ * On a synchronous disable, @decoder->ip still points to the instruction to
+ * which the event bound. That's not where we expect tracing to resume.
+ *
+ * For calls, a fair assumption is that tracing resumes after returning from the
+ * called function. For other types of instructions, we simply don't know.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_blk_set_disable_resume_ip(struct pt_block_decoder *decoder,
+ const struct pt_insn *insn)
+{
+ if (!decoder || !insn)
+ return -pte_internal;
+
+ switch (insn->iclass) {
+ case ptic_call:
+ case ptic_far_call:
+ decoder->ip = insn->ip + insn->size;
+ break;
+
+ default:
+ decoder->ip = 0ull;
+ break;
+ }
+
+ return 0;
+}
+
+/* Proceed to the event location for an async paging event.
+ *
+ * We have an async paging event pending. Proceed to the event location and
+ * indicate whether we were able to reach it. Needing trace in order to proceed
+ * is not an error in this case but ends the block.
+ *
+ * Returns a positive integer if the event location was reached.
+ * Returns zero if the event location was not reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_async_paging(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ const struct pt_event *ev)
+{
+ int status;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* Apply the event immediately if we don't have an IP. */
+ if (ev->ip_suppressed)
+ return 1;
+
+ status = pt_blk_proceed_to_ip_with_trace(decoder, block,
+ ev->variant.async_paging.ip);
+ if (status < 0)
+ return status;
+
+ /* We may have reached the IP. */
+ return (decoder->ip == ev->variant.async_paging.ip ? 1 : 0);
+}
+
+/* Proceed to the event location for an async vmcs event.
+ *
+ * We have an async vmcs event pending. Proceed to the event location and
+ * indicate whether we were able to reach it. Needing trace in order to proceed
+ * is not an error in this case but ends the block.
+ *
+ * Returns a positive integer if the event location was reached.
+ * Returns zero if the event location was not reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_async_vmcs(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ const struct pt_event *ev)
+{
+ int status;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* Apply the event immediately if we don't have an IP. */
+ if (ev->ip_suppressed)
+ return 1;
+
+ status = pt_blk_proceed_to_ip_with_trace(decoder, block,
+ ev->variant.async_vmcs.ip);
+ if (status < 0)
+ return status;
+
+ /* We may have reached the IP. */
+ return (decoder->ip == ev->variant.async_vmcs.ip ? 1 : 0);
+}
+
+/* Proceed to the event location for an exec mode event.
+ *
+ * We have an exec mode event pending. Proceed to the event location and
+ * indicate whether we were able to reach it. Needing trace in order to proceed
+ * is not an error in this case but ends the block.
+ *
+ * Returns a positive integer if the event location was reached.
+ * Returns zero if the event location was not reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_exec_mode(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ const struct pt_event *ev)
+{
+ int status;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* Apply the event immediately if we don't have an IP. */
+ if (ev->ip_suppressed)
+ return 1;
+
+ status = pt_blk_proceed_to_ip_with_trace(decoder, block,
+ ev->variant.exec_mode.ip);
+ if (status < 0)
+ return status;
+
+ /* We may have reached the IP. */
+ return (decoder->ip == ev->variant.exec_mode.ip ? 1 : 0);
+}
+
+/* Proceed to the event location for a ptwrite event.
+ *
+ * We have a ptwrite event pending. Proceed to the event location and indicate
+ * whether we were able to reach it.
+ *
+ * In case of the event binding to a ptwrite instruction, we pass beyond that
+ * instruction and update the event to provide the instruction's IP.
+ *
+ * In the case of the event binding to an IP provided in the event, we move
+ * beyond the instruction at that IP.
+ *
+ * Returns a positive integer if the event location was reached.
+ * Returns zero if the event location was not reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_ptwrite(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ struct pt_insn *insn,
+ struct pt_insn_ext *iext,
+ struct pt_event *ev)
+{
+ int status;
+
+ if (!insn || !ev)
+ return -pte_internal;
+
+ /* If we don't have an IP, the event binds to the next PTWRITE
+ * instruction.
+ *
+ * If we have an IP it still binds to the next PTWRITE instruction but
+ * now the IP tells us where that instruction is. This makes most sense
+ * when tracing is disabled and we don't have any other means of finding
+ * the PTWRITE instruction. We nevertheless distinguish the two cases,
+ * here.
+ *
+ * In both cases, we move beyond the PTWRITE instruction, so it will be
+ * the last instruction in the current block and @decoder->ip will point
+ * to the instruction following it.
+ */
+ if (ev->ip_suppressed) {
+ status = pt_blk_proceed_to_insn(decoder, block, insn, iext,
+ pt_insn_is_ptwrite);
+ if (status <= 0)
+ return status;
+
+ /* We now know the IP of the PTWRITE instruction corresponding
+ * to this event. Fill it in to make it more convenient for the
+ * user to process the event.
+ */
+ ev->variant.ptwrite.ip = insn->ip;
+ ev->ip_suppressed = 0;
+ } else {
+ status = pt_blk_proceed_to_ip(decoder, block, insn, iext,
+ ev->variant.ptwrite.ip);
+ if (status <= 0)
+ return status;
+
+ /* We reached the PTWRITE instruction and @decoder->ip points to
+ * it; @insn/@iext still contain the preceding instruction.
+ *
+ * Proceed beyond the PTWRITE to account for it. Note that we
+ * may still overflow the block, which would cause us to
+ * postpone both instruction and event to the next block.
+ */
+ status = pt_blk_proceed_one_insn(decoder, block, insn, iext);
+ if (status <= 0)
+ return status;
+ }
+
+ return 1;
+}
+
+/* Try to work around erratum SKD022.
+ *
+ * If we get an asynchronous disable on VMLAUNCH or VMRESUME, the FUP that
+ * caused the disable to be asynchronous might have been bogous.
+ *
+ * Returns a positive integer if the erratum has been handled.
+ * Returns zero if the erratum does not apply.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_handle_erratum_skd022(struct pt_block_decoder *decoder,
+ struct pt_event *ev)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int errcode;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ insn.mode = decoder->mode;
+ insn.ip = ev->variant.async_disabled.at;
+
+ errcode = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid);
+ if (errcode < 0)
+ return 0;
+
+ switch (iext.iclass) {
+ default:
+ /* The erratum does not apply. */
+ return 0;
+
+ case PTI_INST_VMLAUNCH:
+ case PTI_INST_VMRESUME:
+ /* The erratum may apply. We can't be sure without a lot more
+ * analysis. Let's assume it does.
+ *
+ * We turn the async disable into a sync disable. Our caller
+ * will restart event processing.
+ */
+ ev->type = ptev_disabled;
+ ev->variant.disabled.ip = ev->variant.async_disabled.ip;
+
+ return 1;
+ }
+}
+
+/* Postpone proceeding past @insn/@iext and indicate a pending event.
+ *
+ * There may be further events pending on @insn/@iext. Postpone proceeding past
+ * @insn/@iext until we processed all events that bind to it.
+ *
+ * Returns a non-negative pt_status_flag bit-vector indicating a pending event
+ * on success, a negative pt_error_code otherwise.
+ */
+static int pt_blk_postpone_insn(struct pt_block_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!decoder || !insn || !iext)
+ return -pte_internal;
+
+ /* Only one can be active. */
+ if (decoder->process_insn)
+ return -pte_internal;
+
+ decoder->process_insn = 1;
+ decoder->insn = *insn;
+ decoder->iext = *iext;
+
+ return pt_blk_status(decoder, pts_event_pending);
+}
+
+/* Remove any postponed instruction from @decoder.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_blk_clear_postponed_insn(struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->process_insn = 0;
+ decoder->bound_paging = 0;
+ decoder->bound_vmcs = 0;
+ decoder->bound_ptwrite = 0;
+
+ return 0;
+}
+
+/* Proceed past a postponed instruction.
+ *
+ * If an instruction has been postponed in @decoder, proceed past it.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_blk_proceed_postponed_insn(struct pt_block_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* There's nothing to do if we have no postponed instruction. */
+ if (!decoder->process_insn)
+ return 0;
+
+ /* There's nothing to do if tracing got disabled. */
+ if (!decoder->enabled)
+ return pt_blk_clear_postponed_insn(decoder);
+
+ status = pt_insn_next_ip(&decoder->ip, &decoder->insn, &decoder->iext);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ status = pt_blk_proceed_with_trace(decoder, &decoder->insn,
+ &decoder->iext);
+ if (status < 0)
+ return status;
+ }
+
+ return pt_blk_clear_postponed_insn(decoder);
+}
+
+/* Proceed to the next event.
+ *
+ * We have an event pending. Proceed to the event location and indicate the
+ * event to the user.
+ *
+ * On our way to the event location we may also be forced to postpone the event
+ * to the next block, e.g. if we overflow the number of instructions in the
+ * block or if we need trace in order to reach the event location.
+ *
+ * If we're not able to reach the event location, we return zero. This is what
+ * pt_blk_status() would return since:
+ *
+ * - we suppress pts_eos as long as we're processing events
+ * - we do not set pts_ip_suppressed since tracing must be enabled
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ */
+static int pt_blk_proceed_event(struct pt_block_decoder *decoder,
+ struct pt_block *block)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ struct pt_event *ev;
+ int status;
+
+ if (!decoder || !decoder->process_event || !block)
+ return -pte_internal;
+
+ ev = &decoder->event;
+ switch (ev->type) {
+ case ptev_enabled:
+ break;
+
+ case ptev_disabled:
+ status = pt_blk_proceed_to_disabled(decoder, block, &insn,
+ &iext, ev);
+ if (status <= 0) {
+ /* A synchronous disable event also binds to the next
+ * indirect or conditional branch, i.e. to any branch
+ * that would have required trace.
+ */
+ if (status != -pte_bad_query)
+ return status;
+
+ status = pt_blk_set_disable_resume_ip(decoder, &insn);
+ if (status < 0)
+ return status;
+ }
+
+ break;
+
+ case ptev_async_disabled:
+ status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext,
+ ev->variant.async_disabled.at);
+ if (status <= 0)
+ return status;
+
+ if (decoder->query.config.errata.skd022) {
+ status = pt_blk_handle_erratum_skd022(decoder, ev);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ /* If the erratum hits, we modify the event.
+ * Try again.
+ */
+ return pt_blk_proceed_event(decoder, block);
+ }
+ }
+
+ break;
+
+ case ptev_async_branch:
+ status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext,
+ ev->variant.async_branch.from);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_paging:
+ if (!decoder->enabled)
+ break;
+
+ status = pt_blk_proceed_to_insn(decoder, block, &insn, &iext,
+ pt_insn_binds_to_pip);
+ if (status <= 0)
+ return status;
+
+ /* We bound a paging event. Make sure we do not bind further
+ * paging events to this instruction.
+ */
+ decoder->bound_paging = 1;
+
+ return pt_blk_postpone_insn(decoder, &insn, &iext);
+
+ case ptev_async_paging:
+ status = pt_blk_proceed_to_async_paging(decoder, block, ev);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_vmcs:
+ if (!decoder->enabled)
+ break;
+
+ status = pt_blk_proceed_to_insn(decoder, block, &insn, &iext,
+ pt_insn_binds_to_vmcs);
+ if (status <= 0)
+ return status;
+
+ /* We bound a vmcs event. Make sure we do not bind further vmcs
+ * events to this instruction.
+ */
+ decoder->bound_vmcs = 1;
+
+ return pt_blk_postpone_insn(decoder, &insn, &iext);
+
+ case ptev_async_vmcs:
+ status = pt_blk_proceed_to_async_vmcs(decoder, block, ev);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_overflow:
+ break;
+
+ case ptev_exec_mode:
+ status = pt_blk_proceed_to_exec_mode(decoder, block, ev);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_tsx:
+ if (ev->ip_suppressed)
+ break;
+
+ status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext,
+ ev->variant.tsx.ip);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_stop:
+ break;
+
+ case ptev_exstop:
+ if (!decoder->enabled || ev->ip_suppressed)
+ break;
+
+ status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext,
+ ev->variant.exstop.ip);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_mwait:
+ if (!decoder->enabled || ev->ip_suppressed)
+ break;
+
+ status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext,
+ ev->variant.mwait.ip);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_pwre:
+ case ptev_pwrx:
+ break;
+
+ case ptev_ptwrite:
+ if (!decoder->enabled)
+ break;
+
+ status = pt_blk_proceed_to_ptwrite(decoder, block, &insn,
+ &iext, ev);
+ if (status <= 0)
+ return status;
+
+ /* We bound a ptwrite event. Make sure we do not bind further
+ * ptwrite events to this instruction.
+ */
+ decoder->bound_ptwrite = 1;
+
+ return pt_blk_postpone_insn(decoder, &insn, &iext);
+
+ case ptev_tick:
+ case ptev_cbr:
+ case ptev_mnt:
+ break;
+ }
+
+ return pt_blk_status(decoder, pts_event_pending);
+}
+
+/* Proceed to the next decision point without using the block cache.
+ *
+ * Tracing is enabled and we don't have an event pending. Proceed as far as
+ * we get without trace. Stop when we either:
+ *
+ * - need trace in order to continue
+ * - overflow the max number of instructions in a block
+ *
+ * We actually proceed one instruction further to get the start IP for the next
+ * block. This only updates @decoder's internal state, though.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_proceed_no_event_uncached(struct pt_block_decoder *decoder,
+ struct pt_block *block)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int status;
+
+ if (!decoder || !block)
+ return -pte_internal;
+
+ /* This is overly conservative, really. We shouldn't get a bad-query
+ * status unless we decoded at least one instruction successfully.
+ */
+ memset(&insn, 0, sizeof(insn));
+ memset(&iext, 0, sizeof(iext));
+
+ /* Proceed as far as we get without trace. */
+ status = pt_blk_proceed_to_insn(decoder, block, &insn, &iext,
+ pt_insn_false);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ return pt_blk_proceed_with_trace(decoder, &insn, &iext);
+ }
+
+ return 0;
+}
+
+/* Check if @ip is contained in @section loaded at @laddr.
+ *
+ * Returns non-zero if it is.
+ * Returns zero if it isn't or of @section is NULL.
+ */
+static inline int pt_blk_is_in_section(const struct pt_mapped_section *msec,
+ uint64_t ip)
+{
+ uint64_t begin, end;
+
+ begin = pt_msec_begin(msec);
+ end = pt_msec_end(msec);
+
+ return (begin <= ip && ip < end);
+}
+
+/* Insert a trampoline block cache entry.
+ *
+ * Add a trampoline block cache entry at @ip to continue at @nip, where @nip
+ * must be the next instruction after @ip.
+ *
+ * Both @ip and @nip must be section-relative
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static inline int pt_blk_add_trampoline(struct pt_block_cache *bcache,
+ uint64_t ip, uint64_t nip,
+ enum pt_exec_mode mode)
+{
+ struct pt_bcache_entry bce;
+ int64_t disp;
+
+ /* The displacement from @ip to @nip for the trampoline. */
+ disp = (int64_t) (nip - ip);
+
+ memset(&bce, 0, sizeof(bce));
+ bce.displacement = (int32_t) disp;
+ bce.ninsn = 1;
+ bce.mode = mode;
+ bce.qualifier = ptbq_again;
+
+ /* If we can't reach @nip without overflowing the displacement field, we
+ * have to stop and re-decode the instruction at @ip.
+ */
+ if ((int64_t) bce.displacement != disp) {
+
+ memset(&bce, 0, sizeof(bce));
+ bce.ninsn = 1;
+ bce.mode = mode;
+ bce.qualifier = ptbq_decode;
+ }
+
+ return pt_bcache_add(bcache, ip, bce);
+}
+
+/* Insert a decode block cache entry.
+ *
+ * Add a decode block cache entry at @ioff.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static inline int pt_blk_add_decode(struct pt_block_cache *bcache,
+ uint64_t ioff, enum pt_exec_mode mode)
+{
+ struct pt_bcache_entry bce;
+
+ memset(&bce, 0, sizeof(bce));
+ bce.ninsn = 1;
+ bce.mode = mode;
+ bce.qualifier = ptbq_decode;
+
+ return pt_bcache_add(bcache, ioff, bce);
+}
+
+enum {
+ /* The maximum number of steps when filling the block cache. */
+ bcache_fill_steps = 0x400
+};
+
+/* Proceed to the next instruction and fill the block cache for @decoder->ip.
+ *
+ * Tracing is enabled and we don't have an event pending. The current IP is not
+ * yet cached.
+ *
+ * Proceed one instruction without using the block cache, then try to proceed
+ * further using the block cache.
+ *
+ * On our way back, add a block cache entry for the IP before proceeding. Note
+ * that the recursion is bounded by @steps and ultimately by the maximum number
+ * of instructions in a block.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int
+pt_blk_proceed_no_event_fill_cache(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ struct pt_block_cache *bcache,
+ const struct pt_mapped_section *msec,
+ size_t steps)
+{
+ struct pt_bcache_entry bce;
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ uint64_t nip, dip;
+ int64_t disp, ioff, noff;
+ int status;
+
+ if (!decoder || !steps)
+ return -pte_internal;
+
+ /* Proceed one instruction by decoding and examining it.
+ *
+ * Note that we also return on a status of zero that indicates that the
+ * instruction didn't fit into @block.
+ */
+ status = pt_blk_proceed_one_insn(decoder, block, &insn, &iext);
+ if (status <= 0)
+ return status;
+
+ ioff = pt_msec_unmap(msec, insn.ip);
+
+ /* Let's see if we can proceed to the next IP without trace.
+ *
+ * If we can't, this is certainly a decision point.
+ */
+ status = pt_insn_next_ip(&decoder->ip, &insn, &iext);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ memset(&bce, 0, sizeof(bce));
+ bce.ninsn = 1;
+ bce.mode = insn.mode;
+ bce.isize = insn.size;
+
+ /* Clear the instruction size in case of overflows. */
+ if ((uint8_t) bce.isize != insn.size)
+ bce.isize = 0;
+
+ switch (insn.iclass) {
+ case ptic_ptwrite:
+ case ptic_error:
+ case ptic_other:
+ return -pte_internal;
+
+ case ptic_jump:
+ /* A direct jump doesn't require trace. */
+ if (iext.variant.branch.is_direct)
+ return -pte_internal;
+
+ bce.qualifier = ptbq_indirect;
+ break;
+
+ case ptic_call:
+ /* A direct call doesn't require trace. */
+ if (iext.variant.branch.is_direct)
+ return -pte_internal;
+
+ bce.qualifier = ptbq_ind_call;
+ break;
+
+ case ptic_return:
+ bce.qualifier = ptbq_return;
+ break;
+
+ case ptic_cond_jump:
+ bce.qualifier = ptbq_cond;
+ break;
+
+ case ptic_far_call:
+ case ptic_far_return:
+ case ptic_far_jump:
+ bce.qualifier = ptbq_indirect;
+ break;
+ }
+
+ /* If the block was truncated, we have to decode its last
+ * instruction each time.
+ *
+ * We could have skipped the above switch and size assignment in
+ * this case but this is already a slow and hopefully infrequent
+ * path.
+ */
+ if (block->truncated)
+ bce.qualifier = ptbq_decode;
+
+ status = pt_bcache_add(bcache, ioff, bce);
+ if (status < 0)
+ return status;
+
+ return pt_blk_proceed_with_trace(decoder, &insn, &iext);
+ }
+
+ /* The next instruction's IP. */
+ nip = decoder->ip;
+ noff = pt_msec_unmap(msec, nip);
+
+ /* Even if we were able to proceed without trace, we might have to stop
+ * here for various reasons:
+ *
+ * - at near direct calls to update the return-address stack
+ *
+ * We are forced to re-decode @insn to get the branch displacement.
+ *
+ * Even though it is constant, we don't cache it to avoid increasing
+ * the size of a cache entry. Note that the displacement field is
+ * zero for this entry and we might be tempted to use it - but other
+ * entries that point to this decision point will have non-zero
+ * displacement.
+ *
+ * We could proceed after a near direct call but we migh as well
+ * postpone it to the next iteration. Make sure to end the block if
+ * @decoder->flags.variant.block.end_on_call is set, though.
+ *
+ * - at near direct backwards jumps to detect section splits
+ *
+ * In case the current section is split underneath us, we must take
+ * care to detect that split.
+ *
+ * There is one corner case where the split is in the middle of a
+ * linear sequence of instructions that branches back into the
+ * originating section.
+ *
+ * Calls, indirect branches, and far branches are already covered
+ * since they either require trace or already require us to stop
+ * (i.e. near direct calls) for other reasons. That leaves near
+ * direct backward jumps.
+ *
+ * Instead of the decode stop at the jump instruction we're using we
+ * could have made sure that other block cache entries that extend
+ * this one insert a trampoline to the jump's entry. This would
+ * have been a bit more complicated.
+ *
+ * - if we switched sections
+ *
+ * This ends a block just like a branch that requires trace.
+ *
+ * We need to re-decode @insn in order to determine the start IP of
+ * the next block.
+ *
+ * - if the block is truncated
+ *
+ * We need to read the last instruction's memory from multiple
+ * sections and provide it to the user.
+ *
+ * We could still use the block cache but then we'd have to handle
+ * this case for each qualifier. Truncation is hopefully rare and
+ * having to read the memory for the instruction from multiple
+ * sections is already slow. Let's rather keep things simple and
+ * route it through the decode flow, where we already have
+ * everything in place.
+ */
+ switch (insn.iclass) {
+ case ptic_call:
+ return pt_blk_add_decode(bcache, ioff, insn.mode);
+
+ case ptic_jump:
+ /* An indirect branch requires trace and should have been
+ * handled above.
+ */
+ if (!iext.variant.branch.is_direct)
+ return -pte_internal;
+
+ if (iext.variant.branch.displacement < 0 ||
+ decoder->flags.variant.block.end_on_jump)
+ return pt_blk_add_decode(bcache, ioff, insn.mode);
+
+ fallthrough;
+ default:
+ if (!pt_blk_is_in_section(msec, nip) || block->truncated)
+ return pt_blk_add_decode(bcache, ioff, insn.mode);
+
+ break;
+ }
+
+ /* We proceeded one instruction. Let's see if we have a cache entry for
+ * the next instruction.
+ */
+ status = pt_bcache_lookup(&bce, bcache, noff);
+ if (status < 0)
+ return status;
+
+ /* If we don't have a valid cache entry, yet, fill the cache some more.
+ *
+ * On our way back, we add a cache entry for this instruction based on
+ * the cache entry of the succeeding instruction.
+ */
+ if (!pt_bce_is_valid(bce)) {
+ /* If we exceeded the maximum number of allowed steps, we insert
+ * a trampoline to the next instruction.
+ *
+ * The next time we encounter the same code, we will use the
+ * trampoline to jump directly to where we left off this time
+ * and continue from there.
+ */
+ steps -= 1;
+ if (!steps)
+ return pt_blk_add_trampoline(bcache, ioff, noff,
+ insn.mode);
+
+ status = pt_blk_proceed_no_event_fill_cache(decoder, block,
+ bcache, msec,
+ steps);
+ if (status < 0)
+ return status;
+
+ /* Let's see if we have more luck this time. */
+ status = pt_bcache_lookup(&bce, bcache, noff);
+ if (status < 0)
+ return status;
+
+ /* If we still don't have a valid cache entry, we're done. Most
+ * likely, @block overflowed and we couldn't proceed past the
+ * next instruction.
+ */
+ if (!pt_bce_is_valid(bce))
+ return 0;
+ }
+
+ /* We must not have switched execution modes.
+ *
+ * This would require an event and we're on the no-event flow.
+ */
+ if (pt_bce_exec_mode(bce) != insn.mode)
+ return -pte_internal;
+
+ /* The decision point IP and the displacement from @insn.ip. */
+ dip = nip + bce.displacement;
+ disp = (int64_t) (dip - insn.ip);
+
+ /* We may have switched sections if the section was split. See
+ * pt_blk_proceed_no_event_cached() for a more elaborate comment.
+ *
+ * We're not adding a block cache entry since this won't apply to the
+ * original section which may be shared with other decoders.
+ *
+ * We will instead take the slow path until the end of the section.
+ */
+ if (!pt_blk_is_in_section(msec, dip))
+ return 0;
+
+ /* Let's try to reach @nip's decision point from @insn.ip.
+ *
+ * There are two fields that may overflow: @bce.ninsn and
+ * @bce.displacement.
+ */
+ bce.ninsn += 1;
+ bce.displacement = (int32_t) disp;
+
+ /* If none of them overflowed, we're done.
+ *
+ * If one or both overflowed, let's try to insert a trampoline, i.e. we
+ * try to reach @dip via a ptbq_again entry to @nip.
+ */
+ if (!bce.ninsn || ((int64_t) bce.displacement != disp))
+ return pt_blk_add_trampoline(bcache, ioff, noff, insn.mode);
+
+ /* We're done. Add the cache entry.
+ *
+ * There's a chance that other decoders updated the cache entry in the
+ * meantime. They should have come to the same conclusion as we,
+ * though, and the cache entries should be identical.
+ *
+ * Cache updates are atomic so even if the two versions were not
+ * identical, we wouldn't care because they are both correct.
+ */
+ return pt_bcache_add(bcache, ioff, bce);
+}
+
+/* Proceed at a potentially truncated instruction.
+ *
+ * We were not able to decode the instruction at @decoder->ip in @decoder's
+ * cached section. This is typically caused by not having enough bytes.
+ *
+ * Try to decode the instruction again using the entire image. If this succeeds
+ * we expect to end up with an instruction that was truncated in the section it
+ * started. We provide the full instruction in this case and end the block.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_proceed_truncated(struct pt_block_decoder *decoder,
+ struct pt_block *block)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int errcode;
+
+ if (!decoder || !block)
+ return -pte_internal;
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ insn.mode = decoder->mode;
+ insn.ip = decoder->ip;
+
+ errcode = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid);
+ if (errcode < 0)
+ return errcode;
+
+ /* We shouldn't use this function if the instruction isn't truncated. */
+ if (!insn.truncated)
+ return -pte_internal;
+
+ /* Provide the instruction in the block. This ends the block. */
+ memcpy(block->raw, insn.raw, insn.size);
+ block->iclass = insn.iclass;
+ block->size = insn.size;
+ block->truncated = 1;
+
+ /* Log calls' return addresses for return compression. */
+ errcode = pt_blk_log_call(decoder, &insn, &iext);
+ if (errcode < 0)
+ return errcode;
+
+ /* Let's see if we can proceed to the next IP without trace.
+ *
+ * The truncated instruction ends the block but we still need to get the
+ * next block's start IP.
+ */
+ errcode = pt_insn_next_ip(&decoder->ip, &insn, &iext);
+ if (errcode < 0) {
+ if (errcode != -pte_bad_query)
+ return errcode;
+
+ return pt_blk_proceed_with_trace(decoder, &insn, &iext);
+ }
+
+ return 0;
+}
+
+/* Proceed to the next decision point using the block cache.
+ *
+ * Tracing is enabled and we don't have an event pending. We already set
+ * @block's isid. All reads are done within @msec as we're not switching
+ * sections between blocks.
+ *
+ * Proceed as far as we get without trace. Stop when we either:
+ *
+ * - need trace in order to continue
+ * - overflow the max number of instructions in a block
+ *
+ * We actually proceed one instruction further to get the start IP for the next
+ * block. This only updates @decoder's internal state, though.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_proceed_no_event_cached(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ struct pt_block_cache *bcache,
+ const struct pt_mapped_section *msec)
+{
+ struct pt_bcache_entry bce;
+ uint16_t binsn, ninsn;
+ uint64_t offset, nip;
+ int status;
+
+ if (!decoder || !block)
+ return -pte_internal;
+
+ offset = pt_msec_unmap(msec, decoder->ip);
+ status = pt_bcache_lookup(&bce, bcache, offset);
+ if (status < 0)
+ return status;
+
+ /* If we don't find a valid cache entry, fill the cache. */
+ if (!pt_bce_is_valid(bce))
+ return pt_blk_proceed_no_event_fill_cache(decoder, block,
+ bcache, msec,
+ bcache_fill_steps);
+
+ /* If we switched sections, the origianl section must have been split
+ * underneath us. A split preserves the block cache of the original
+ * section.
+ *
+ * Crossing sections requires ending the block so we can indicate the
+ * proper isid for the entire block.
+ *
+ * Plus there's the chance that the new section that caused the original
+ * section to split changed instructions.
+ *
+ * This check will also cover changes to a linear sequence of code we
+ * would otherwise have jumped over as long as the start and end are in
+ * different sub-sections.
+ *
+ * Since we stop on every (backwards) branch (through an artificial stop
+ * in the case of a near direct backward branch) we will detect all
+ * section splits.
+ *
+ * Switch to the slow path until we reach the end of this section.
+ */
+ nip = decoder->ip + bce.displacement;
+ if (!pt_blk_is_in_section(msec, nip))
+ return pt_blk_proceed_no_event_uncached(decoder, block);
+
+ /* We have a valid cache entry. Let's first check if the way to the
+ * decision point still fits into @block.
+ *
+ * If it doesn't, we end the block without filling it as much as we
+ * could since this would require us to switch to the slow path.
+ *
+ * On the next iteration, we will start with an empty block, which is
+ * guaranteed to have enough room for at least one block cache entry.
+ */
+ binsn = block->ninsn;
+ ninsn = binsn + (uint16_t) bce.ninsn;
+ if (ninsn < binsn)
+ return 0;
+
+ /* Jump ahead to the decision point and proceed from there.
+ *
+ * We're not switching execution modes so even if @block already has an
+ * execution mode, it will be the one we're going to set.
+ */
+ decoder->ip = nip;
+
+ /* We don't know the instruction class so we should be setting it to
+ * ptic_error. Since we will be able to fill it back in later in most
+ * cases, we move the clearing to the switch cases that don't.
+ */
+ block->end_ip = nip;
+ block->ninsn = ninsn;
+ block->mode = pt_bce_exec_mode(bce);
+
+
+ switch (pt_bce_qualifier(bce)) {
+ case ptbq_again:
+ /* We're not able to reach the actual decision point due to
+ * overflows so we inserted a trampoline.
+ *
+ * We don't know the instruction and it is not guaranteed that
+ * we will proceed further (e.g. if @block overflowed). Let's
+ * clear any previously stored instruction class which has
+ * become invalid when we updated @block->ninsn.
+ */
+ block->iclass = ptic_error;
+
+ return pt_blk_proceed_no_event_cached(decoder, block, bcache,
+ msec);
+
+ case ptbq_cond:
+ /* We're at a conditional branch. */
+ block->iclass = ptic_cond_jump;
+
+ /* Let's first check whether we know the size of the
+ * instruction. If we do, we might get away without decoding
+ * the instruction.
+ *
+ * If we don't know the size we might as well do the full decode
+ * and proceed-with-trace flow we do for ptbq_decode.
+ */
+ if (bce.isize) {
+ uint64_t ip;
+ int taken;
+
+ /* If the branch is not taken, we don't need to decode
+ * the instruction at @decoder->ip.
+ *
+ * If it is taken, we have to implement everything here.
+ * We can't use the normal decode and proceed-with-trace
+ * flow since we already consumed the TNT bit.
+ */
+ status = pt_blk_cond_branch(decoder, &taken);
+ if (status < 0)
+ return status;
+
+ /* Preserve the query decoder's response which indicates
+ * upcoming events.
+ */
+ decoder->status = status;
+
+ ip = decoder->ip;
+ if (taken) {
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ insn.mode = pt_bce_exec_mode(bce);
+ insn.ip = ip;
+
+ status = pt_blk_decode_in_section(&insn, &iext,
+ msec);
+ if (status < 0)
+ return status;
+
+ ip += iext.variant.branch.displacement;
+ }
+
+ decoder->ip = ip + bce.isize;
+ break;
+ }
+
+ fallthrough;
+ case ptbq_decode: {
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+
+ /* We need to decode the instruction at @decoder->ip and decide
+ * what to do based on that.
+ *
+ * We already accounted for the instruction so we can't just
+ * call pt_blk_proceed_one_insn().
+ */
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ insn.mode = pt_bce_exec_mode(bce);
+ insn.ip = decoder->ip;
+
+ status = pt_blk_decode_in_section(&insn, &iext, msec);
+ if (status < 0) {
+ if (status != -pte_bad_insn)
+ return status;
+
+ return pt_blk_proceed_truncated(decoder, block);
+ }
+
+ /* We just decoded @insn so we know the instruction class. */
+ block->iclass = insn.iclass;
+
+ /* Log calls' return addresses for return compression. */
+ status = pt_blk_log_call(decoder, &insn, &iext);
+ if (status < 0)
+ return status;
+
+ /* Let's see if we can proceed to the next IP without trace.
+ *
+ * Note that we also stop due to displacement overflows or to
+ * maintain the return-address stack for near direct calls.
+ */
+ status = pt_insn_next_ip(&decoder->ip, &insn, &iext);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ /* We can't, so let's proceed with trace, which
+ * completes the block.
+ */
+ return pt_blk_proceed_with_trace(decoder, &insn, &iext);
+ }
+
+ /* End the block if the user asked us to.
+ *
+ * We only need to take care about direct near branches.
+ * Indirect and far branches require trace and will naturally
+ * end a block.
+ */
+ if ((decoder->flags.variant.block.end_on_call &&
+ (insn.iclass == ptic_call)) ||
+ (decoder->flags.variant.block.end_on_jump &&
+ (insn.iclass == ptic_jump)))
+ break;
+
+ /* If we can proceed without trace and we stay in @msec we may
+ * proceed further.
+ *
+ * We're done if we switch sections, though.
+ */
+ if (!pt_blk_is_in_section(msec, decoder->ip))
+ break;
+
+ return pt_blk_proceed_no_event_cached(decoder, block, bcache,
+ msec);
+ }
+
+ case ptbq_ind_call: {
+ uint64_t ip;
+
+ /* We're at a near indirect call. */
+ block->iclass = ptic_call;
+
+ /* We need to update the return-address stack and query the
+ * destination IP.
+ */
+ ip = decoder->ip;
+
+ /* If we already know the size of the instruction, we don't need
+ * to re-decode it.
+ */
+ if (bce.isize)
+ ip += bce.isize;
+ else {
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ insn.mode = pt_bce_exec_mode(bce);
+ insn.ip = ip;
+
+ status = pt_blk_decode_in_section(&insn, &iext, msec);
+ if (status < 0)
+ return status;
+
+ ip += insn.size;
+ }
+
+ status = pt_retstack_push(&decoder->retstack, ip);
+ if (status < 0)
+ return status;
+
+ status = pt_blk_indirect_branch(decoder, &decoder->ip);
+ if (status < 0)
+ return status;
+
+ /* Preserve the query decoder's response which indicates
+ * upcoming events.
+ */
+ decoder->status = status;
+ break;
+ }
+
+ case ptbq_return: {
+ int taken;
+
+ /* We're at a near return. */
+ block->iclass = ptic_return;
+
+ /* Check for a compressed return. */
+ status = pt_blk_cond_branch(decoder, &taken);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ /* The return is not compressed. We need another query
+ * to determine the destination IP.
+ */
+ status = pt_blk_indirect_branch(decoder, &decoder->ip);
+ if (status < 0)
+ return status;
+
+ /* Preserve the query decoder's response which indicates
+ * upcoming events.
+ */
+ decoder->status = status;
+ break;
+ }
+
+ /* Preserve the query decoder's response which indicates
+ * upcoming events.
+ */
+ decoder->status = status;
+
+ /* A compressed return is indicated by a taken conditional
+ * branch.
+ */
+ if (!taken)
+ return -pte_bad_retcomp;
+
+ return pt_retstack_pop(&decoder->retstack, &decoder->ip);
+ }
+
+ case ptbq_indirect:
+ /* We're at an indirect jump or far transfer.
+ *
+ * We don't know the exact instruction class and there's no
+ * reason to decode the instruction for any other purpose.
+ *
+ * Indicate that we don't know the instruction class and leave
+ * it to our caller to decode the instruction if needed.
+ */
+ block->iclass = ptic_error;
+
+ /* This is neither a near call nor return so we don't need to
+ * touch the return-address stack.
+ *
+ * Just query the destination IP.
+ */
+ status = pt_blk_indirect_branch(decoder, &decoder->ip);
+ if (status < 0)
+ return status;
+
+ /* Preserve the query decoder's response which indicates
+ * upcoming events.
+ */
+ decoder->status = status;
+ break;
+ }
+
+ return 0;
+}
+
+static int pt_blk_msec_fill(struct pt_block_decoder *decoder,
+ const struct pt_mapped_section **pmsec)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_section *section;
+ int isid, errcode;
+
+ if (!decoder || !pmsec)
+ return -pte_internal;
+
+ isid = pt_msec_cache_fill(&decoder->scache, &msec, decoder->image,
+ &decoder->asid, decoder->ip);
+ if (isid < 0)
+ return isid;
+
+ section = pt_msec_section(msec);
+ if (!section)
+ return -pte_internal;
+
+ *pmsec = msec;
+
+ errcode = pt_section_request_bcache(section);
+ if (errcode < 0)
+ return errcode;
+
+ return isid;
+}
+
+static inline int pt_blk_msec_lookup(struct pt_block_decoder *decoder,
+ const struct pt_mapped_section **pmsec)
+{
+ int isid;
+
+ if (!decoder)
+ return -pte_internal;
+
+ isid = pt_msec_cache_read(&decoder->scache, pmsec, decoder->image,
+ decoder->ip);
+ if (isid < 0) {
+ if (isid != -pte_nomap)
+ return isid;
+
+ return pt_blk_msec_fill(decoder, pmsec);
+ }
+
+ return isid;
+}
+
+/* Proceed to the next decision point - try using the cache.
+ *
+ * Tracing is enabled and we don't have an event pending. Proceed as far as
+ * we get without trace. Stop when we either:
+ *
+ * - need trace in order to continue
+ * - overflow the max number of instructions in a block
+ *
+ * We actually proceed one instruction further to get the start IP for the next
+ * block. This only updates @decoder's internal state, though.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_proceed_no_event(struct pt_block_decoder *decoder,
+ struct pt_block *block)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_block_cache *bcache;
+ struct pt_section *section;
+ int isid;
+
+ if (!decoder || !block)
+ return -pte_internal;
+
+ isid = pt_blk_msec_lookup(decoder, &msec);
+ if (isid < 0) {
+ if (isid != -pte_nomap)
+ return isid;
+
+ /* Even if there is no such section in the image, we may still
+ * read the memory via the callback function.
+ */
+ return pt_blk_proceed_no_event_uncached(decoder, block);
+ }
+
+ /* We do not switch sections inside a block. */
+ if (isid != block->isid) {
+ if (!pt_blk_block_is_empty(block))
+ return 0;
+
+ block->isid = isid;
+ }
+
+ section = pt_msec_section(msec);
+ if (!section)
+ return -pte_internal;
+
+ bcache = pt_section_bcache(section);
+ if (!bcache)
+ return pt_blk_proceed_no_event_uncached(decoder, block);
+
+ return pt_blk_proceed_no_event_cached(decoder, block, bcache, msec);
+}
+
+/* Proceed to the next event or decision point.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ */
+static int pt_blk_proceed(struct pt_block_decoder *decoder,
+ struct pt_block *block)
+{
+ int status;
+
+ status = pt_blk_fetch_event(decoder);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ return pt_blk_proceed_event(decoder, block);
+ }
+
+ /* If tracing is disabled we should either be out of trace or we should
+ * have taken the event flow above.
+ */
+ if (!decoder->enabled) {
+ if (decoder->status & pts_eos)
+ return -pte_eos;
+
+ return -pte_no_enable;
+ }
+
+ status = pt_blk_proceed_no_event(decoder, block);
+ if (status < 0)
+ return status;
+
+ return pt_blk_proceed_trailing_event(decoder, block);
+}
+
+enum {
+ /* The maximum number of steps to take when determining whether the
+ * event location can be reached.
+ */
+ bdm64_max_steps = 0x100
+};
+
+/* Try to work around erratum BDM64.
+ *
+ * If we got a transaction abort immediately following a branch that produced
+ * trace, the trace for that branch might have been corrupted.
+ *
+ * Returns a positive integer if the erratum was handled.
+ * Returns zero if the erratum does not seem to apply.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_handle_erratum_bdm64(struct pt_block_decoder *decoder,
+ const struct pt_block *block,
+ const struct pt_event *ev)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int status;
+
+ if (!decoder || !block || !ev)
+ return -pte_internal;
+
+ /* This only affects aborts. */
+ if (!ev->variant.tsx.aborted)
+ return 0;
+
+ /* This only affects branches that require trace.
+ *
+ * If the erratum hits, that branch ended the current block and brought
+ * us to the trailing event flow.
+ */
+ if (pt_blk_block_is_empty(block))
+ return 0;
+
+ insn.mode = block->mode;
+ insn.ip = block->end_ip;
+
+ status = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid);
+ if (status < 0)
+ return 0;
+
+ if (!pt_insn_is_branch(&insn, &iext))
+ return 0;
+
+ /* Let's check if we can reach the event location from here.
+ *
+ * If we can, let's assume the erratum did not hit. We might still be
+ * wrong but we're not able to tell.
+ */
+ status = pt_insn_range_is_contiguous(decoder->ip, ev->variant.tsx.ip,
+ decoder->mode, decoder->image,
+ &decoder->asid, bdm64_max_steps);
+ if (status > 0)
+ return status;
+
+ /* We can't reach the event location. This could either mean that we
+ * stopped too early (and status is zero) or that the erratum hit.
+ *
+ * We assume the latter and pretend that the previous branch brought us
+ * to the event location, instead.
+ */
+ decoder->ip = ev->variant.tsx.ip;
+
+ return 1;
+}
+
+/* Check whether a trailing TSX event should be postponed.
+ *
+ * This involves handling erratum BDM64.
+ *
+ * Returns a positive integer if the event is to be postponed.
+ * Returns zero if the event should be processed.
+ * Returns a negative error code otherwise.
+ */
+static inline int pt_blk_postpone_trailing_tsx(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ const struct pt_event *ev)
+{
+ int status;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ if (ev->ip_suppressed)
+ return 0;
+
+ if (block && decoder->query.config.errata.bdm64) {
+ status = pt_blk_handle_erratum_bdm64(decoder, block, ev);
+ if (status < 0)
+ return 1;
+ }
+
+ if (decoder->ip != ev->variant.tsx.ip)
+ return 1;
+
+ return 0;
+}
+
+/* Proceed with events that bind to the current decoder IP.
+ *
+ * This function is used in the following scenarios:
+ *
+ * - we just synchronized onto the trace stream
+ * - we ended a block and proceeded to the next IP
+ * - we processed an event that was indicated by this function
+ *
+ * Check if there is an event at the current IP that needs to be indicated to
+ * the user.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ */
+static int pt_blk_proceed_trailing_event(struct pt_block_decoder *decoder,
+ struct pt_block *block)
+{
+ struct pt_event *ev;
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_blk_fetch_event(decoder);
+ if (status <= 0) {
+ if (status < 0)
+ return status;
+
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, 0);
+ }
+
+ ev = &decoder->event;
+ switch (ev->type) {
+ case ptev_disabled:
+ /* Synchronous disable events are normally indicated on the
+ * event flow.
+ */
+ if (!decoder->process_insn)
+ break;
+
+ /* A sync disable may bind to a CR3 changing instruction. */
+ if (ev->ip_suppressed &&
+ pt_insn_changes_cr3(&decoder->insn, &decoder->iext))
+ return pt_blk_status(decoder, pts_event_pending);
+
+ /* Or it binds to the next branch that would require trace.
+ *
+ * Try to complete processing the current instruction by
+ * proceeding past it. If that fails because it would require
+ * trace, we can apply the disabled event.
+ */
+ status = pt_insn_next_ip(&decoder->ip, &decoder->insn,
+ &decoder->iext);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ status = pt_blk_set_disable_resume_ip(decoder,
+ &decoder->insn);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, pts_event_pending);
+ }
+
+ /* We proceeded past the current instruction. */
+ status = pt_blk_clear_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ /* This might have brought us to the disable IP. */
+ if (!ev->ip_suppressed &&
+ decoder->ip == ev->variant.disabled.ip)
+ return pt_blk_status(decoder, pts_event_pending);
+
+ break;
+
+ case ptev_enabled:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_async_disabled:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (decoder->ip != ev->variant.async_disabled.at)
+ break;
+
+ if (decoder->query.config.errata.skd022) {
+ status = pt_blk_handle_erratum_skd022(decoder, ev);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ /* If the erratum applies, the event is modified
+ * to a synchronous disable event that will be
+ * processed on the next pt_blk_proceed_event()
+ * call. We're done.
+ */
+ break;
+ }
+ }
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_async_branch:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (decoder->ip != ev->variant.async_branch.from)
+ break;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_paging:
+ /* We apply the event immediately if we're not tracing. */
+ if (!decoder->enabled)
+ return pt_blk_status(decoder, pts_event_pending);
+
+ /* Synchronous paging events are normally indicated on the event
+ * flow, unless they bind to the same instruction as a previous
+ * event.
+ *
+ * We bind at most one paging event to an instruction, though.
+ */
+ if (!decoder->process_insn || decoder->bound_paging)
+ break;
+
+ /* We're done if we're not binding to the currently postponed
+ * instruction. We will process the event on the normal event
+ * flow in the next iteration.
+ */
+ if (!pt_insn_binds_to_pip(&decoder->insn, &decoder->iext))
+ break;
+
+ /* We bound a paging event. Make sure we do not bind further
+ * paging events to this instruction.
+ */
+ decoder->bound_paging = 1;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_async_paging:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_paging.ip)
+ break;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_vmcs:
+ /* We apply the event immediately if we're not tracing. */
+ if (!decoder->enabled)
+ return pt_blk_status(decoder, pts_event_pending);
+
+ /* Synchronous vmcs events are normally indicated on the event
+ * flow, unless they bind to the same instruction as a previous
+ * event.
+ *
+ * We bind at most one vmcs event to an instruction, though.
+ */
+ if (!decoder->process_insn || decoder->bound_vmcs)
+ break;
+
+ /* We're done if we're not binding to the currently postponed
+ * instruction. We will process the event on the normal event
+ * flow in the next iteration.
+ */
+ if (!pt_insn_binds_to_vmcs(&decoder->insn, &decoder->iext))
+ break;
+
+ /* We bound a vmcs event. Make sure we do not bind further vmcs
+ * events to this instruction.
+ */
+ decoder->bound_vmcs = 1;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_async_vmcs:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_vmcs.ip)
+ break;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_overflow:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_exec_mode:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.exec_mode.ip)
+ break;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_tsx:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ status = pt_blk_postpone_trailing_tsx(decoder, block, ev);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ break;
+ }
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_stop:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_exstop:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.exstop.ip)
+ break;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_mwait:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.mwait.ip)
+ break;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_pwre:
+ case ptev_pwrx:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_ptwrite:
+ /* We apply the event immediately if we're not tracing. */
+ if (!decoder->enabled)
+ return pt_blk_status(decoder, pts_event_pending);
+
+ /* Ptwrite events are normally indicated on the event flow,
+ * unless they bind to the same instruction as a previous event.
+ *
+ * We bind at most one ptwrite event to an instruction, though.
+ */
+ if (!decoder->process_insn || decoder->bound_ptwrite)
+ break;
+
+ /* We're done if we're not binding to the currently postponed
+ * instruction. We will process the event on the normal event
+ * flow in the next iteration.
+ */
+ if (!ev->ip_suppressed ||
+ !pt_insn_is_ptwrite(&decoder->insn, &decoder->iext))
+ break;
+
+ /* We bound a ptwrite event. Make sure we do not bind further
+ * ptwrite events to this instruction.
+ */
+ decoder->bound_ptwrite = 1;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_tick:
+ case ptev_cbr:
+ case ptev_mnt:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, pts_event_pending);
+ }
+
+ /* No further events. Proceed past any postponed instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, 0);
+}
+
+int pt_blk_next(struct pt_block_decoder *decoder, struct pt_block *ublock,
+ size_t size)
+{
+ struct pt_block block, *pblock;
+ int errcode, status;
+
+ if (!decoder || !ublock)
+ return -pte_invalid;
+
+ pblock = size == sizeof(block) ? ublock : &block;
+
+ /* Zero-initialize the block in case of error returns. */
+ memset(pblock, 0, sizeof(*pblock));
+
+ /* Fill in a few things from the current decode state.
+ *
+ * This reflects the state of the last pt_blk_next() or pt_blk_start()
+ * call. Note that, unless we stop with tracing disabled, we proceed
+ * already to the start IP of the next block.
+ *
+ * Some of the state may later be overwritten as we process events.
+ */
+ pblock->ip = decoder->ip;
+ pblock->mode = decoder->mode;
+ if (decoder->speculative)
+ pblock->speculative = 1;
+
+ /* Proceed one block. */
+ status = pt_blk_proceed(decoder, pblock);
+
+ errcode = block_to_user(ublock, size, pblock);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+}
+
+/* Process an enabled event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_enabled(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* We must have an IP in order to start decoding. */
+ if (ev->ip_suppressed)
+ return -pte_noip;
+
+ /* We must currently be disabled. */
+ if (decoder->enabled)
+ return -pte_bad_context;
+
+ decoder->ip = ev->variant.enabled.ip;
+ decoder->enabled = 1;
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process a disabled event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_disabled(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* We must currently be enabled. */
+ if (!decoder->enabled)
+ return -pte_bad_context;
+
+ /* We preserve @decoder->ip. This is where we expect tracing to resume
+ * and we'll indicate that on the subsequent enabled event if tracing
+ * actually does resume from there.
+ */
+ decoder->enabled = 0;
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process an asynchronous branch event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_async_branch(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* We must currently be enabled. */
+ if (!decoder->enabled)
+ return -pte_bad_context;
+
+ /* Jump to the branch destination. We will continue from there in the
+ * next iteration.
+ */
+ decoder->ip = ev->variant.async_branch.to;
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process a paging event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_paging(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ uint64_t cr3;
+ int errcode;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ cr3 = ev->variant.paging.cr3;
+ if (decoder->asid.cr3 != cr3) {
+ errcode = pt_msec_cache_invalidate(&decoder->scache);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->asid.cr3 = cr3;
+ }
+
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process a vmcs event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_vmcs(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ uint64_t vmcs;
+ int errcode;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ vmcs = ev->variant.vmcs.base;
+ if (decoder->asid.vmcs != vmcs) {
+ errcode = pt_msec_cache_invalidate(&decoder->scache);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->asid.vmcs = vmcs;
+ }
+
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process an overflow event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_overflow(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* If the IP is suppressed, the overflow resolved while tracing was
+ * disabled. Otherwise it resolved while tracing was enabled.
+ */
+ if (ev->ip_suppressed) {
+ /* Tracing is disabled. It doesn't make sense to preserve the
+ * previous IP. This will just be misleading. Even if tracing
+ * had been disabled before, as well, we might have missed the
+ * re-enable in the overflow.
+ */
+ decoder->enabled = 0;
+ decoder->ip = 0ull;
+ } else {
+ /* Tracing is enabled and we're at the IP at which the overflow
+ * resolved.
+ */
+ decoder->enabled = 1;
+ decoder->ip = ev->variant.overflow.ip;
+ }
+
+ /* We don't know the TSX state. Let's assume we execute normally.
+ *
+ * We also don't know the execution mode. Let's keep what we have
+ * in case we don't get an update before we have to decode the next
+ * instruction.
+ */
+ decoder->speculative = 0;
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process an exec mode event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_exec_mode(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ enum pt_exec_mode mode;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* Use status update events to diagnose inconsistencies. */
+ mode = ev->variant.exec_mode.mode;
+ if (ev->status_update && decoder->enabled &&
+ decoder->mode != ptem_unknown && decoder->mode != mode)
+ return -pte_bad_status_update;
+
+ decoder->mode = mode;
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process a tsx event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_tsx(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ decoder->speculative = ev->variant.tsx.speculative;
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process a stop event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_stop(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* Tracing is always disabled before it is stopped. */
+ if (decoder->enabled)
+ return -pte_bad_context;
+
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+int pt_blk_event(struct pt_block_decoder *decoder, struct pt_event *uevent,
+ size_t size)
+{
+ struct pt_event *ev;
+ int status;
+
+ if (!decoder || !uevent)
+ return -pte_invalid;
+
+ /* We must currently process an event. */
+ if (!decoder->process_event)
+ return -pte_bad_query;
+
+ ev = &decoder->event;
+ switch (ev->type) {
+ case ptev_enabled:
+ /* Indicate that tracing resumes from the IP at which tracing
+ * had been disabled before (with some special treatment for
+ * calls).
+ */
+ if (ev->variant.enabled.ip == decoder->ip)
+ ev->variant.enabled.resumed = 1;
+
+ status = pt_blk_process_enabled(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_disabled:
+ if (decoder->ip != ev->variant.async_disabled.at)
+ return -pte_bad_query;
+
+ fallthrough;
+ case ptev_disabled:
+
+ status = pt_blk_process_disabled(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_branch:
+ if (decoder->ip != ev->variant.async_branch.from)
+ return -pte_bad_query;
+
+ status = pt_blk_process_async_branch(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_paging:
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_paging.ip)
+ return -pte_bad_query;
+
+ fallthrough;
+ case ptev_paging:
+ status = pt_blk_process_paging(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_vmcs:
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_vmcs.ip)
+ return -pte_bad_query;
+
+ fallthrough;
+ case ptev_vmcs:
+ status = pt_blk_process_vmcs(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_overflow:
+ status = pt_blk_process_overflow(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_exec_mode:
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.exec_mode.ip)
+ return -pte_bad_query;
+
+ status = pt_blk_process_exec_mode(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_tsx:
+ if (!ev->ip_suppressed && decoder->ip != ev->variant.tsx.ip)
+ return -pte_bad_query;
+
+ status = pt_blk_process_tsx(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_stop:
+ status = pt_blk_process_stop(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_exstop:
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.exstop.ip)
+ return -pte_bad_query;
+
+ decoder->process_event = 0;
+ break;
+
+ case ptev_mwait:
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.mwait.ip)
+ return -pte_bad_query;
+
+ decoder->process_event = 0;
+ break;
+
+ case ptev_pwre:
+ case ptev_pwrx:
+ case ptev_ptwrite:
+ case ptev_tick:
+ case ptev_cbr:
+ case ptev_mnt:
+ decoder->process_event = 0;
+ break;
+ }
+
+ /* Copy the event to the user. Make sure we're not writing beyond the
+ * memory provided by the user.
+ *
+ * We might truncate details of an event but only for those events the
+ * user can't know about, anyway.
+ */
+ if (sizeof(*ev) < size)
+ size = sizeof(*ev);
+
+ memcpy(uevent, ev, size);
+
+ /* Indicate further events. */
+ return pt_blk_proceed_trailing_event(decoder, NULL);
+}
diff --git a/libipt/src/pt_config.c b/libipt/src/pt_config.c
new file mode 100644
index 000000000000..1479daebd556
--- /dev/null
+++ b/libipt/src/pt_config.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_config.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+#include <stddef.h>
+
+
+int pt_cpu_errata(struct pt_errata *errata, const struct pt_cpu *cpu)
+{
+ if (!errata || !cpu)
+ return -pte_invalid;
+
+ memset(errata, 0, sizeof(*errata));
+
+ /* We don't know about others. */
+ if (cpu->vendor != pcv_intel)
+ return -pte_bad_cpu;
+
+ switch (cpu->family) {
+ case 0x6:
+ switch (cpu->model) {
+ case 0x3d:
+ case 0x47:
+ case 0x4f:
+ case 0x56:
+ errata->bdm70 = 1;
+ errata->bdm64 = 1;
+ return 0;
+
+ case 0x4e:
+ case 0x5e:
+ errata->bdm70 = 1;
+ errata->skd007 = 1;
+ errata->skd022 = 1;
+ errata->skd010 = 1;
+ errata->skl014 = 1;
+ return 0;
+
+ case 0x8e:
+ case 0x9e:
+ errata->bdm70 = 1;
+ errata->skl014 = 1;
+ errata->skd022 = 1;
+ errata->skd010 = 1;
+ errata->skd007 = 1;
+ return 0;
+
+ case 0x5c:
+ case 0x5f:
+ errata->apl12 = 1;
+ errata->apl11 = 1;
+ return 0;
+ }
+ break;
+ }
+
+ return -pte_bad_cpu;
+}
+
+int pt_config_from_user(struct pt_config *config,
+ const struct pt_config *uconfig)
+{
+ uint8_t *begin, *end;
+ size_t size;
+
+ if (!config)
+ return -pte_internal;
+
+ if (!uconfig)
+ return -pte_invalid;
+
+ size = uconfig->size;
+ if (size < offsetof(struct pt_config, decode))
+ return -pte_bad_config;
+
+ begin = uconfig->begin;
+ end = uconfig->end;
+
+ if (!begin || !end || end < begin)
+ return -pte_bad_config;
+
+ /* Ignore fields in the user's configuration we don't know; zero out
+ * fields the user didn't know about.
+ */
+ if (sizeof(*config) <= size)
+ size = sizeof(*config);
+ else
+ memset(((uint8_t *) config) + size, 0, sizeof(*config) - size);
+
+ /* Copy (portions of) the user's configuration. */
+ memcpy(config, uconfig, size);
+
+ /* We copied user's size - fix it. */
+ config->size = size;
+
+ return 0;
+}
+
+/* The maximum number of filter addresses that fit into the configuration. */
+static inline size_t pt_filter_addr_ncfg(void)
+{
+ return (sizeof(struct pt_conf_addr_filter) -
+ offsetof(struct pt_conf_addr_filter, addr0_a)) /
+ (2 * sizeof(uint64_t));
+}
+
+uint32_t pt_filter_addr_cfg(const struct pt_conf_addr_filter *filter, uint8_t n)
+{
+ if (!filter)
+ return 0u;
+
+ if (pt_filter_addr_ncfg() <= n)
+ return 0u;
+
+ return (filter->config.addr_cfg >> (4 * n)) & 0xf;
+}
+
+uint64_t pt_filter_addr_a(const struct pt_conf_addr_filter *filter, uint8_t n)
+{
+ const uint64_t *addr;
+
+ if (!filter)
+ return 0ull;
+
+ if (pt_filter_addr_ncfg() <= n)
+ return 0ull;
+
+ addr = &filter->addr0_a;
+ return addr[2 * n];
+}
+
+uint64_t pt_filter_addr_b(const struct pt_conf_addr_filter *filter, uint8_t n)
+{
+ const uint64_t *addr;
+
+ if (!filter)
+ return 0ull;
+
+ if (pt_filter_addr_ncfg() <= n)
+ return 0ull;
+
+ addr = &filter->addr0_a;
+ return addr[(2 * n) + 1];
+}
+
+static int pt_filter_check_cfg_filter(const struct pt_conf_addr_filter *filter,
+ uint64_t addr)
+{
+ uint8_t n;
+
+ if (!filter)
+ return -pte_internal;
+
+ for (n = 0; n < pt_filter_addr_ncfg(); ++n) {
+ uint64_t addr_a, addr_b;
+ uint32_t addr_cfg;
+
+ addr_cfg = pt_filter_addr_cfg(filter, n);
+ if (addr_cfg != pt_addr_cfg_filter)
+ continue;
+
+ addr_a = pt_filter_addr_a(filter, n);
+ addr_b = pt_filter_addr_b(filter, n);
+
+ /* Note that both A and B are inclusive. */
+ if ((addr_a <= addr) && (addr <= addr_b))
+ return 1;
+ }
+
+ /* No filter hit. If we have at least one FilterEn filter, this means
+ * that tracing is disabled; otherwise, tracing is enabled.
+ */
+ for (n = 0; n < pt_filter_addr_ncfg(); ++n) {
+ uint32_t addr_cfg;
+
+ addr_cfg = pt_filter_addr_cfg(filter, n);
+ if (addr_cfg == pt_addr_cfg_filter)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int pt_filter_check_cfg_stop(const struct pt_conf_addr_filter *filter,
+ uint64_t addr)
+{
+ uint8_t n;
+
+ if (!filter)
+ return -pte_internal;
+
+ for (n = 0; n < pt_filter_addr_ncfg(); ++n) {
+ uint64_t addr_a, addr_b;
+ uint32_t addr_cfg;
+
+ addr_cfg = pt_filter_addr_cfg(filter, n);
+ if (addr_cfg != pt_addr_cfg_stop)
+ continue;
+
+ addr_a = pt_filter_addr_a(filter, n);
+ addr_b = pt_filter_addr_b(filter, n);
+
+ /* Note that both A and B are inclusive. */
+ if ((addr_a <= addr) && (addr <= addr_b))
+ return 0;
+ }
+
+ return 1;
+}
+
+int pt_filter_addr_check(const struct pt_conf_addr_filter *filter,
+ uint64_t addr)
+{
+ int status;
+
+ status = pt_filter_check_cfg_stop(filter, addr);
+ if (status <= 0)
+ return status;
+
+ return pt_filter_check_cfg_filter(filter, addr);
+}
diff --git a/libipt/src/pt_cpu.c b/libipt/src/pt_cpu.c
new file mode 100644
index 000000000000..c47e54d40cf6
--- /dev/null
+++ b/libipt/src/pt_cpu.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_cpu.h"
+#include "pt_cpuid.h"
+
+#include "intel-pt.h"
+
+#include <limits.h>
+#include <stdlib.h>
+
+
+static const char * const cpu_vendors[] = {
+ "",
+ "GenuineIntel"
+};
+
+enum {
+ pt_cpuid_vendor_size = 12
+};
+
+union cpu_vendor {
+ /* The raw data returned from cpuid. */
+ struct {
+ uint32_t ebx;
+ uint32_t edx;
+ uint32_t ecx;
+ } cpuid;
+
+ /* The resulting vendor string. */
+ char vendor_string[pt_cpuid_vendor_size];
+};
+
+static enum pt_cpu_vendor cpu_vendor(void)
+{
+ union cpu_vendor vendor;
+ uint32_t eax;
+ size_t i;
+
+ memset(&vendor, 0, sizeof(vendor));
+ eax = 0;
+
+ pt_cpuid(0u, &eax, &vendor.cpuid.ebx, &vendor.cpuid.ecx,
+ &vendor.cpuid.edx);
+
+ for (i = 0; i < sizeof(cpu_vendors)/sizeof(*cpu_vendors); i++)
+ if (strncmp(vendor.vendor_string,
+ cpu_vendors[i], pt_cpuid_vendor_size) == 0)
+ return (enum pt_cpu_vendor) i;
+
+ return pcv_unknown;
+}
+
+static uint32_t cpu_info(void)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ eax = 0;
+ ebx = 0;
+ ecx = 0;
+ edx = 0;
+ pt_cpuid(1u, &eax, &ebx, &ecx, &edx);
+
+ return eax;
+}
+
+int pt_cpu_parse(struct pt_cpu *cpu, const char *s)
+{
+ const char sep = '/';
+ char *endptr;
+ long family, model, stepping;
+
+ if (!cpu || !s)
+ return -pte_invalid;
+
+ family = strtol(s, &endptr, 0);
+ if (s == endptr || *endptr == '\0' || *endptr != sep)
+ return -pte_invalid;
+
+ if (family < 0 || family > USHRT_MAX)
+ return -pte_invalid;
+
+ /* skip separator */
+ s = endptr + 1;
+
+ model = strtol(s, &endptr, 0);
+ if (s == endptr || (*endptr != '\0' && *endptr != sep))
+ return -pte_invalid;
+
+ if (model < 0 || model > UCHAR_MAX)
+ return -pte_invalid;
+
+ if (*endptr == '\0')
+ /* stepping was omitted, it defaults to 0 */
+ stepping = 0;
+ else {
+ /* skip separator */
+ s = endptr + 1;
+
+ stepping = strtol(s, &endptr, 0);
+ if (*endptr != '\0')
+ return -pte_invalid;
+
+ if (stepping < 0 || stepping > UCHAR_MAX)
+ return -pte_invalid;
+ }
+
+ cpu->vendor = pcv_intel;
+ cpu->family = (uint16_t) family;
+ cpu->model = (uint8_t) model;
+ cpu->stepping = (uint8_t) stepping;
+
+ return 0;
+}
+
+int pt_cpu_read(struct pt_cpu *cpu)
+{
+ uint32_t info;
+ uint16_t family;
+
+ if (!cpu)
+ return -pte_invalid;
+
+ cpu->vendor = cpu_vendor();
+
+ info = cpu_info();
+
+ cpu->family = family = (info>>8) & 0xf;
+ if (family == 0xf)
+ cpu->family += (info>>20) & 0xf;
+
+ cpu->model = (info>>4) & 0xf;
+ if (family == 0x6 || family == 0xf)
+ cpu->model += (info>>12) & 0xf0;
+
+ cpu->stepping = (info>>0) & 0xf;
+
+ return 0;
+}
diff --git a/libipt/src/pt_decoder_function.c b/libipt/src/pt_decoder_function.c
new file mode 100644
index 000000000000..4c7d48e1c68c
--- /dev/null
+++ b/libipt/src/pt_decoder_function.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_decoder_function.h"
+#include "pt_packet_decoder.h"
+#include "pt_query_decoder.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+
+const struct pt_decoder_function pt_decode_unknown = {
+ /* .packet = */ pt_pkt_decode_unknown,
+ /* .decode = */ pt_qry_decode_unknown,
+ /* .header = */ pt_qry_decode_unknown,
+ /* .flags = */ pdff_unknown
+};
+
+const struct pt_decoder_function pt_decode_pad = {
+ /* .packet = */ pt_pkt_decode_pad,
+ /* .decode = */ pt_qry_decode_pad,
+ /* .header = */ pt_qry_decode_pad,
+ /* .flags = */ pdff_pad
+};
+
+const struct pt_decoder_function pt_decode_psb = {
+ /* .packet = */ pt_pkt_decode_psb,
+ /* .decode = */ pt_qry_decode_psb,
+ /* .header = */ NULL,
+ /* .flags = */ 0
+};
+
+const struct pt_decoder_function pt_decode_tip = {
+ /* .packet = */ pt_pkt_decode_tip,
+ /* .decode = */ pt_qry_decode_tip,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_tip
+};
+
+const struct pt_decoder_function pt_decode_tnt_8 = {
+ /* .packet = */ pt_pkt_decode_tnt_8,
+ /* .decode = */ pt_qry_decode_tnt_8,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_tnt
+};
+
+const struct pt_decoder_function pt_decode_tnt_64 = {
+ /* .packet = */ pt_pkt_decode_tnt_64,
+ /* .decode = */ pt_qry_decode_tnt_64,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_tnt
+};
+
+const struct pt_decoder_function pt_decode_tip_pge = {
+ /* .packet = */ pt_pkt_decode_tip_pge,
+ /* .decode = */ pt_qry_decode_tip_pge,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_tip_pgd = {
+ /* .packet = */ pt_pkt_decode_tip_pgd,
+ /* .decode = */ pt_qry_decode_tip_pgd,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_fup = {
+ /* .packet = */ pt_pkt_decode_fup,
+ /* .decode = */ pt_qry_decode_fup,
+ /* .header = */ pt_qry_header_fup,
+ /* .flags = */ pdff_fup
+};
+
+const struct pt_decoder_function pt_decode_pip = {
+ /* .packet = */ pt_pkt_decode_pip,
+ /* .decode = */ pt_qry_decode_pip,
+ /* .header = */ pt_qry_header_pip,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_ovf = {
+ /* .packet = */ pt_pkt_decode_ovf,
+ /* .decode = */ pt_qry_decode_ovf,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_psbend | pdff_event
+};
+
+const struct pt_decoder_function pt_decode_mode = {
+ /* .packet = */ pt_pkt_decode_mode,
+ /* .decode = */ pt_qry_decode_mode,
+ /* .header = */ pt_qry_header_mode,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_psbend = {
+ /* .packet = */ pt_pkt_decode_psbend,
+ /* .decode = */ pt_qry_decode_psbend,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_psbend
+};
+
+const struct pt_decoder_function pt_decode_tsc = {
+ /* .packet = */ pt_pkt_decode_tsc,
+ /* .decode = */ pt_qry_decode_tsc,
+ /* .header = */ pt_qry_header_tsc,
+ /* .flags = */ pdff_timing
+};
+
+const struct pt_decoder_function pt_decode_cbr = {
+ /* .packet = */ pt_pkt_decode_cbr,
+ /* .decode = */ pt_qry_decode_cbr,
+ /* .header = */ pt_qry_header_cbr,
+ /* .flags = */ pdff_timing | pdff_event
+};
+
+const struct pt_decoder_function pt_decode_tma = {
+ /* .packet = */ pt_pkt_decode_tma,
+ /* .decode = */ pt_qry_decode_tma,
+ /* .header = */ pt_qry_decode_tma,
+ /* .flags = */ pdff_timing
+};
+
+const struct pt_decoder_function pt_decode_mtc = {
+ /* .packet = */ pt_pkt_decode_mtc,
+ /* .decode = */ pt_qry_decode_mtc,
+ /* .header = */ pt_qry_decode_mtc,
+ /* .flags = */ pdff_timing
+};
+
+const struct pt_decoder_function pt_decode_cyc = {
+ /* .packet = */ pt_pkt_decode_cyc,
+ /* .decode = */ pt_qry_decode_cyc,
+ /* .header = */ pt_qry_decode_cyc,
+ /* .flags = */ pdff_timing
+};
+
+const struct pt_decoder_function pt_decode_stop = {
+ /* .packet = */ pt_pkt_decode_stop,
+ /* .decode = */ pt_qry_decode_stop,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_vmcs = {
+ /* .packet = */ pt_pkt_decode_vmcs,
+ /* .decode = */ pt_qry_decode_vmcs,
+ /* .header = */ pt_qry_header_vmcs,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_mnt = {
+ /* .packet = */ pt_pkt_decode_mnt,
+ /* .decode = */ pt_qry_decode_mnt,
+ /* .header = */ pt_qry_header_mnt,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_exstop = {
+ /* .packet = */ pt_pkt_decode_exstop,
+ /* .decode = */ pt_qry_decode_exstop,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_mwait = {
+ /* .packet = */ pt_pkt_decode_mwait,
+ /* .decode = */ pt_qry_decode_mwait,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_pwre = {
+ /* .packet = */ pt_pkt_decode_pwre,
+ /* .decode = */ pt_qry_decode_pwre,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_pwrx = {
+ /* .packet = */ pt_pkt_decode_pwrx,
+ /* .decode = */ pt_qry_decode_pwrx,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_ptw = {
+ /* .packet = */ pt_pkt_decode_ptw,
+ /* .decode = */ pt_qry_decode_ptw,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+
+int pt_df_fetch(const struct pt_decoder_function **dfun, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+ uint8_t opc, ext, ext2;
+
+ if (!dfun || !config)
+ return -pte_internal;
+
+ /* Clear the decode function in case of errors. */
+ *dfun = NULL;
+
+ begin = config->begin;
+ end = config->end;
+
+ if (!pos || (pos < begin) || (end < pos))
+ return -pte_nosync;
+
+ if (pos == end)
+ return -pte_eos;
+
+ opc = *pos++;
+ switch (opc) {
+ default:
+ /* Check opcodes that require masking. */
+ if ((opc & pt_opm_tnt_8) == pt_opc_tnt_8) {
+ *dfun = &pt_decode_tnt_8;
+ return 0;
+ }
+
+ if ((opc & pt_opm_cyc) == pt_opc_cyc) {
+ *dfun = &pt_decode_cyc;
+ return 0;
+ }
+
+ if ((opc & pt_opm_tip) == pt_opc_tip) {
+ *dfun = &pt_decode_tip;
+ return 0;
+ }
+
+ if ((opc & pt_opm_fup) == pt_opc_fup) {
+ *dfun = &pt_decode_fup;
+ return 0;
+ }
+
+ if ((opc & pt_opm_tip) == pt_opc_tip_pge) {
+ *dfun = &pt_decode_tip_pge;
+ return 0;
+ }
+
+ if ((opc & pt_opm_tip) == pt_opc_tip_pgd) {
+ *dfun = &pt_decode_tip_pgd;
+ return 0;
+ }
+
+ *dfun = &pt_decode_unknown;
+ return 0;
+
+ case pt_opc_pad:
+ *dfun = &pt_decode_pad;
+ return 0;
+
+ case pt_opc_mode:
+ *dfun = &pt_decode_mode;
+ return 0;
+
+ case pt_opc_tsc:
+ *dfun = &pt_decode_tsc;
+ return 0;
+
+ case pt_opc_mtc:
+ *dfun = &pt_decode_mtc;
+ return 0;
+
+ case pt_opc_ext:
+ if (pos == end)
+ return -pte_eos;
+
+ ext = *pos++;
+ switch (ext) {
+ default:
+ /* Check opcodes that require masking. */
+ if ((ext & pt_opm_ptw) == pt_ext_ptw) {
+ *dfun = &pt_decode_ptw;
+ return 0;
+ }
+
+ *dfun = &pt_decode_unknown;
+ return 0;
+
+ case pt_ext_psb:
+ *dfun = &pt_decode_psb;
+ return 0;
+
+ case pt_ext_ovf:
+ *dfun = &pt_decode_ovf;
+ return 0;
+
+ case pt_ext_tnt_64:
+ *dfun = &pt_decode_tnt_64;
+ return 0;
+
+ case pt_ext_psbend:
+ *dfun = &pt_decode_psbend;
+ return 0;
+
+ case pt_ext_cbr:
+ *dfun = &pt_decode_cbr;
+ return 0;
+
+ case pt_ext_pip:
+ *dfun = &pt_decode_pip;
+ return 0;
+
+ case pt_ext_tma:
+ *dfun = &pt_decode_tma;
+ return 0;
+
+ case pt_ext_stop:
+ *dfun = &pt_decode_stop;
+ return 0;
+
+ case pt_ext_vmcs:
+ *dfun = &pt_decode_vmcs;
+ return 0;
+
+ case pt_ext_exstop:
+ case pt_ext_exstop_ip:
+ *dfun = &pt_decode_exstop;
+ return 0;
+
+ case pt_ext_mwait:
+ *dfun = &pt_decode_mwait;
+ return 0;
+
+ case pt_ext_pwre:
+ *dfun = &pt_decode_pwre;
+ return 0;
+
+ case pt_ext_pwrx:
+ *dfun = &pt_decode_pwrx;
+ return 0;
+
+ case pt_ext_ext2:
+ if (pos == end)
+ return -pte_eos;
+
+ ext2 = *pos++;
+ switch (ext2) {
+ default:
+ *dfun = &pt_decode_unknown;
+ return 0;
+
+ case pt_ext2_mnt:
+ *dfun = &pt_decode_mnt;
+ return 0;
+ }
+ }
+ }
+}
diff --git a/libipt/src/pt_encoder.c b/libipt/src/pt_encoder.c
new file mode 100644
index 000000000000..946b88cbff7a
--- /dev/null
+++ b/libipt/src/pt_encoder.c
@@ -0,0 +1,917 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_encoder.h"
+#include "pt_config.h"
+#include "pt_opcodes.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+
+int pt_encoder_init(struct pt_encoder *encoder, const struct pt_config *config)
+{
+ int errcode;
+
+ if (!encoder)
+ return -pte_invalid;
+
+ memset(encoder, 0, sizeof(*encoder));
+
+ errcode = pt_config_from_user(&encoder->config, config);
+ if (errcode < 0)
+ return errcode;
+
+ encoder->pos = encoder->config.begin;
+
+ return 0;
+}
+
+void pt_encoder_fini(struct pt_encoder *encoder)
+{
+ (void) encoder;
+
+ /* Nothing to do. */
+}
+
+struct pt_encoder *pt_alloc_encoder(const struct pt_config *config)
+{
+ struct pt_encoder *encoder;
+ int errcode;
+
+ encoder = malloc(sizeof(*encoder));
+ if (!encoder)
+ return NULL;
+
+ errcode = pt_encoder_init(encoder, config);
+ if (errcode < 0) {
+ free(encoder);
+ return NULL;
+ }
+
+ return encoder;
+}
+
+void pt_free_encoder(struct pt_encoder *encoder)
+{
+ pt_encoder_fini(encoder);
+ free(encoder);
+}
+
+int pt_enc_sync_set(struct pt_encoder *encoder, uint64_t offset)
+{
+ uint8_t *begin, *end, *pos;
+
+ if (!encoder)
+ return -pte_invalid;
+
+ begin = encoder->config.begin;
+ end = encoder->config.end;
+ pos = begin + offset;
+
+ if (end < pos || pos < begin)
+ return -pte_eos;
+
+ encoder->pos = pos;
+ return 0;
+}
+
+int pt_enc_get_offset(const struct pt_encoder *encoder, uint64_t *offset)
+{
+ const uint8_t *raw, *begin;
+
+ if (!encoder || !offset)
+ return -pte_invalid;
+
+ /* The encoder is synchronized at all times. */
+ raw = encoder->pos;
+ if (!raw)
+ return -pte_internal;
+
+ begin = encoder->config.begin;
+ if (!begin)
+ return -pte_internal;
+
+ *offset = raw - begin;
+ return 0;
+}
+
+const struct pt_config *pt_enc_get_config(const struct pt_encoder *encoder)
+{
+ if (!encoder)
+ return NULL;
+
+ return &encoder->config;
+}
+
+/* Check the remaining space.
+ *
+ * Returns zero if there are at least \@size bytes of free space available in
+ * \@encoder's Intel PT buffer.
+ *
+ * Returns -pte_eos if not enough space is available.
+ * Returns -pte_internal if \@encoder is NULL.
+ * Returns -pte_internal if \@encoder is not synchronized.
+ */
+static int pt_reserve(const struct pt_encoder *encoder, unsigned int size)
+{
+ const uint8_t *begin, *end, *pos;
+
+ if (!encoder)
+ return -pte_internal;
+
+ /* The encoder is synchronized at all times. */
+ pos = encoder->pos;
+ if (!pos)
+ return -pte_internal;
+
+ begin = encoder->config.begin;
+ end = encoder->config.end;
+
+ pos += size;
+ if (pos < begin || end < pos)
+ return -pte_eos;
+
+ return 0;
+}
+
+/* Return the size of an IP payload based on its IP compression.
+ *
+ * Returns -pte_bad_packet if \@ipc is not a valid IP compression.
+ */
+static int pt_ipc_size(enum pt_ip_compression ipc)
+{
+ switch (ipc) {
+ case pt_ipc_suppressed:
+ return 0;
+
+ case pt_ipc_update_16:
+ return pt_pl_ip_upd16_size;
+
+ case pt_ipc_update_32:
+ return pt_pl_ip_upd32_size;
+
+ case pt_ipc_update_48:
+ return pt_pl_ip_upd48_size;
+
+ case pt_ipc_sext_48:
+ return pt_pl_ip_sext48_size;
+
+ case pt_ipc_full:
+ return pt_pl_ip_full_size;
+ }
+
+ return -pte_invalid;
+}
+
+/* Encode an integer value.
+ *
+ * Writes the \@size least signifficant bytes of \@value starting from \@pos.
+ *
+ * The caller needs to ensure that there is enough space available.
+ *
+ * Returns the updated position.
+ */
+static uint8_t *pt_encode_int(uint8_t *pos, uint64_t val, int size)
+{
+ for (; size; --size, val >>= 8)
+ *pos++ = (uint8_t) val;
+
+ return pos;
+}
+
+/* Encode an IP packet.
+ *
+ * Write an IP packet with opcode \@opc and payload from \@packet if there is
+ * enough space in \@encoder's Intel PT buffer.
+ *
+ * Returns the number of bytes written on success.
+ *
+ * Returns -pte_eos if there is not enough space.
+ * Returns -pte_internal if \@encoder or \@packet is NULL.
+ * Returns -pte_invalid if \@packet.ipc is not valid.
+ */
+static int pt_encode_ip(struct pt_encoder *encoder, enum pt_opcode op,
+ const struct pt_packet_ip *packet)
+{
+ uint8_t *pos;
+ uint8_t opc, ipc;
+ int size, errcode;
+
+ if (!encoder || !packet)
+ return pte_internal;
+
+ size = pt_ipc_size(packet->ipc);
+ if (size < 0)
+ return size;
+
+ errcode = pt_reserve(encoder, /* opc size = */ 1 + size);
+ if (errcode < 0)
+ return errcode;
+
+ /* We already checked the ipc in pt_ipc_size(). */
+ ipc = (uint8_t) (packet->ipc << pt_opm_ipc_shr);
+ opc = (uint8_t) op;
+
+ pos = encoder->pos;
+ *pos++ = opc | ipc;
+
+ encoder->pos = pt_encode_int(pos, packet->ip, size);
+ return /* opc size = */ 1 + size;
+}
+
+int pt_enc_next(struct pt_encoder *encoder, const struct pt_packet *packet)
+{
+ uint8_t *pos, *begin;
+ int errcode;
+
+ if (!encoder || !packet)
+ return -pte_invalid;
+
+ pos = begin = encoder->pos;
+ switch (packet->type) {
+ case ppt_pad:
+ errcode = pt_reserve(encoder, ptps_pad);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_pad;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_psb: {
+ uint64_t psb;
+
+ errcode = pt_reserve(encoder, ptps_psb);
+ if (errcode < 0)
+ return errcode;
+
+ psb = ((uint64_t) pt_psb_hilo << 48 |
+ (uint64_t) pt_psb_hilo << 32 |
+ (uint64_t) pt_psb_hilo << 16 |
+ (uint64_t) pt_psb_hilo);
+
+ pos = pt_encode_int(pos, psb, 8);
+ pos = pt_encode_int(pos, psb, 8);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_psbend:
+ errcode = pt_reserve(encoder, ptps_psbend);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_psbend;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_ovf:
+ errcode = pt_reserve(encoder, ptps_ovf);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_ovf;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_fup:
+ return pt_encode_ip(encoder, pt_opc_fup, &packet->payload.ip);
+
+ case ppt_tip:
+ return pt_encode_ip(encoder, pt_opc_tip, &packet->payload.ip);
+
+ case ppt_tip_pge:
+ return pt_encode_ip(encoder, pt_opc_tip_pge,
+ &packet->payload.ip);
+
+ case ppt_tip_pgd:
+ return pt_encode_ip(encoder, pt_opc_tip_pgd,
+ &packet->payload.ip);
+
+ case ppt_tnt_8: {
+ uint8_t opc, stop;
+
+ if (packet->payload.tnt.bit_size >= 7)
+ return -pte_bad_packet;
+
+ errcode = pt_reserve(encoder, ptps_tnt_8);
+ if (errcode < 0)
+ return errcode;
+
+ stop = packet->payload.tnt.bit_size + pt_opm_tnt_8_shr;
+ opc = (uint8_t)
+ (packet->payload.tnt.payload << pt_opm_tnt_8_shr);
+
+ *pos++ = (uint8_t) (opc | (1u << stop));
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_tnt_64: {
+ uint64_t tnt, stop;
+
+ errcode = pt_reserve(encoder, ptps_tnt_64);
+ if (errcode < 0)
+ return errcode;
+
+ if (packet->payload.tnt.bit_size >= pt_pl_tnt_64_bits)
+ return -pte_invalid;
+
+ stop = 1ull << packet->payload.tnt.bit_size;
+ tnt = packet->payload.tnt.payload;
+
+ if (tnt & ~(stop - 1))
+ return -pte_invalid;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_tnt_64;
+ pos = pt_encode_int(pos, tnt | stop, pt_pl_tnt_64_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_mode: {
+ uint8_t mode;
+
+ errcode = pt_reserve(encoder, ptps_mode);
+ if (errcode < 0)
+ return errcode;
+
+ switch (packet->payload.mode.leaf) {
+ default:
+ return -pte_bad_packet;
+
+ case pt_mol_exec:
+ mode = pt_mol_exec;
+
+ if (packet->payload.mode.bits.exec.csl)
+ mode |= pt_mob_exec_csl;
+
+ if (packet->payload.mode.bits.exec.csd)
+ mode |= pt_mob_exec_csd;
+ break;
+
+ case pt_mol_tsx:
+ mode = pt_mol_tsx;
+
+ if (packet->payload.mode.bits.tsx.intx)
+ mode |= pt_mob_tsx_intx;
+
+ if (packet->payload.mode.bits.tsx.abrt)
+ mode |= pt_mob_tsx_abrt;
+ break;
+ }
+
+ *pos++ = pt_opc_mode;
+ *pos++ = mode;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_pip: {
+ uint64_t cr3;
+
+ errcode = pt_reserve(encoder, ptps_pip);
+ if (errcode < 0)
+ return errcode;
+
+ cr3 = packet->payload.pip.cr3;
+ cr3 >>= pt_pl_pip_shl;
+ cr3 <<= pt_pl_pip_shr;
+
+ if (packet->payload.pip.nr)
+ cr3 |= (uint64_t) pt_pl_pip_nr;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_pip;
+ pos = pt_encode_int(pos, cr3, pt_pl_pip_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_tsc:
+ errcode = pt_reserve(encoder, ptps_tsc);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_tsc;
+ pos = pt_encode_int(pos, packet->payload.tsc.tsc,
+ pt_pl_tsc_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_cbr:
+ errcode = pt_reserve(encoder, ptps_cbr);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_cbr;
+ *pos++ = packet->payload.cbr.ratio;
+ *pos++ = 0;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_tma: {
+ uint16_t ctc, fc;
+
+ errcode = pt_reserve(encoder, ptps_tma);
+ if (errcode < 0)
+ return errcode;
+
+ ctc = packet->payload.tma.ctc;
+ fc = packet->payload.tma.fc;
+
+ if (fc & ~pt_pl_tma_fc_mask)
+ return -pte_bad_packet;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_tma;
+ pos = pt_encode_int(pos, ctc, pt_pl_tma_ctc_size);
+ *pos++ = 0;
+ pos = pt_encode_int(pos, fc, pt_pl_tma_fc_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_mtc:
+ errcode = pt_reserve(encoder, ptps_mtc);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_mtc;
+ *pos++ = packet->payload.mtc.ctc;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_cyc: {
+ uint8_t byte[pt_pl_cyc_max_size], index, end;
+ uint64_t ctc;
+
+ ctc = (uint8_t) packet->payload.cyc.value;
+ ctc <<= pt_opm_cyc_shr;
+
+ byte[0] = pt_opc_cyc;
+ byte[0] |= (uint8_t) ctc;
+
+ ctc = packet->payload.cyc.value;
+ ctc >>= (8 - pt_opm_cyc_shr);
+ if (ctc)
+ byte[0] |= pt_opm_cyc_ext;
+
+ for (end = 1; ctc; ++end) {
+ /* Check if the CYC payload is too big. */
+ if (pt_pl_cyc_max_size <= end)
+ return -pte_bad_packet;
+
+ ctc <<= pt_opm_cycx_shr;
+
+ byte[end] = (uint8_t) ctc;
+
+ ctc >>= 8;
+ if (ctc)
+ byte[end] |= pt_opm_cycx_ext;
+ }
+
+ errcode = pt_reserve(encoder, end);
+ if (errcode < 0)
+ return errcode;
+
+ for (index = 0; index < end; ++index)
+ *pos++ = byte[index];
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_stop:
+ errcode = pt_reserve(encoder, ptps_stop);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_stop;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_vmcs:
+ errcode = pt_reserve(encoder, ptps_vmcs);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_vmcs;
+ pos = pt_encode_int(pos,
+ packet->payload.vmcs.base >> pt_pl_vmcs_shl,
+ pt_pl_vmcs_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_mnt:
+ errcode = pt_reserve(encoder, ptps_mnt);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_ext2;
+ *pos++ = pt_ext2_mnt;
+ pos = pt_encode_int(pos, packet->payload.mnt.payload,
+ pt_pl_mnt_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_exstop: {
+ uint8_t ext;
+
+ errcode = pt_reserve(encoder, ptps_exstop);
+ if (errcode < 0)
+ return errcode;
+
+ ext = packet->payload.exstop.ip ?
+ pt_ext_exstop_ip : pt_ext_exstop;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = ext;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_mwait:
+ errcode = pt_reserve(encoder, ptps_mwait);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_mwait;
+ pos = pt_encode_int(pos, packet->payload.mwait.hints,
+ pt_pl_mwait_hints_size);
+ pos = pt_encode_int(pos, packet->payload.mwait.ext,
+ pt_pl_mwait_ext_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_pwre: {
+ uint64_t payload;
+
+ errcode = pt_reserve(encoder, ptps_pwre);
+ if (errcode < 0)
+ return errcode;
+
+ payload = 0ull;
+ payload |= ((uint64_t) packet->payload.pwre.state <<
+ pt_pl_pwre_state_shr) &
+ (uint64_t) pt_pl_pwre_state_mask;
+ payload |= ((uint64_t) packet->payload.pwre.sub_state <<
+ pt_pl_pwre_sub_state_shr) &
+ (uint64_t) pt_pl_pwre_sub_state_mask;
+
+ if (packet->payload.pwre.hw)
+ payload |= (uint64_t) pt_pl_pwre_hw_mask;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_pwre;
+ pos = pt_encode_int(pos, payload, pt_pl_pwre_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_pwrx: {
+ uint64_t payload;
+
+ errcode = pt_reserve(encoder, ptps_pwrx);
+ if (errcode < 0)
+ return errcode;
+
+ payload = 0ull;
+ payload |= ((uint64_t) packet->payload.pwrx.last <<
+ pt_pl_pwrx_last_shr) &
+ (uint64_t) pt_pl_pwrx_last_mask;
+ payload |= ((uint64_t) packet->payload.pwrx.deepest <<
+ pt_pl_pwrx_deepest_shr) &
+ (uint64_t) pt_pl_pwrx_deepest_mask;
+
+ if (packet->payload.pwrx.interrupt)
+ payload |= (uint64_t) pt_pl_pwrx_wr_int;
+ if (packet->payload.pwrx.store)
+ payload |= (uint64_t) pt_pl_pwrx_wr_store;
+ if (packet->payload.pwrx.autonomous)
+ payload |= (uint64_t) pt_pl_pwrx_wr_hw;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_pwrx;
+ pos = pt_encode_int(pos, payload, pt_pl_pwrx_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_ptw: {
+ uint8_t plc, ext;
+ int size;
+
+ plc = packet->payload.ptw.plc;
+
+ size = pt_ptw_size(plc);
+ if (size < 0)
+ return size;
+
+ errcode = pt_reserve(encoder, pt_opcs_ptw + size);
+ if (errcode < 0)
+ return errcode;
+
+ ext = pt_ext_ptw;
+ ext |= plc << pt_opm_ptw_pb_shr;
+
+ if (packet->payload.ptw.ip)
+ ext |= (uint8_t) pt_opm_ptw_ip;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = ext;
+ pos = pt_encode_int(pos, packet->payload.ptw.payload, size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_unknown:
+ case ppt_invalid:
+ return -pte_bad_opc;
+ }
+
+ return -pte_bad_opc;
+}
+
+int pt_encode_pad(struct pt_encoder *encoder)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_pad;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_psb(struct pt_encoder *encoder)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_psb;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_psbend(struct pt_encoder *encoder)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_psbend;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tip(struct pt_encoder *encoder, uint64_t ip,
+ enum pt_ip_compression ipc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tip;
+ packet.payload.ip.ip = ip;
+ packet.payload.ip.ipc = ipc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tnt_8(struct pt_encoder *encoder, uint8_t tnt, int size)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tnt_8;
+ packet.payload.tnt.bit_size = (uint8_t) size;
+ packet.payload.tnt.payload = tnt;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tnt_64(struct pt_encoder *encoder, uint64_t tnt, int size)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tnt_64;
+ packet.payload.tnt.bit_size = (uint8_t) size;
+ packet.payload.tnt.payload = tnt;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tip_pge(struct pt_encoder *encoder, uint64_t ip,
+ enum pt_ip_compression ipc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tip_pge;
+ packet.payload.ip.ip = ip;
+ packet.payload.ip.ipc = ipc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tip_pgd(struct pt_encoder *encoder, uint64_t ip,
+ enum pt_ip_compression ipc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tip_pgd;
+ packet.payload.ip.ip = ip;
+ packet.payload.ip.ipc = ipc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_fup(struct pt_encoder *encoder, uint64_t ip,
+ enum pt_ip_compression ipc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_fup;
+ packet.payload.ip.ip = ip;
+ packet.payload.ip.ipc = ipc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_pip(struct pt_encoder *encoder, uint64_t cr3, uint8_t flags)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_pip;
+ packet.payload.pip.cr3 = cr3;
+ packet.payload.pip.nr = (flags & pt_pl_pip_nr) != 0;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_ovf(struct pt_encoder *encoder)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_ovf;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_mode_exec(struct pt_encoder *encoder, enum pt_exec_mode mode)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_mode;
+ packet.payload.mode.leaf = pt_mol_exec;
+ packet.payload.mode.bits.exec = pt_set_exec_mode(mode);
+
+ return pt_enc_next(encoder, &packet);
+}
+
+
+int pt_encode_mode_tsx(struct pt_encoder *encoder, uint8_t bits)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_mode;
+ packet.payload.mode.leaf = pt_mol_tsx;
+
+ if (bits & pt_mob_tsx_intx)
+ packet.payload.mode.bits.tsx.intx = 1;
+ else
+ packet.payload.mode.bits.tsx.intx = 0;
+
+ if (bits & pt_mob_tsx_abrt)
+ packet.payload.mode.bits.tsx.abrt = 1;
+ else
+ packet.payload.mode.bits.tsx.abrt = 0;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tsc(struct pt_encoder *encoder, uint64_t tsc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tsc;
+ packet.payload.tsc.tsc = tsc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_cbr(struct pt_encoder *encoder, uint8_t cbr)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_cbr;
+ packet.payload.cbr.ratio = cbr;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tma(struct pt_encoder *encoder, uint16_t ctc, uint16_t fc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tma;
+ packet.payload.tma.ctc = ctc;
+ packet.payload.tma.fc = fc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_mtc(struct pt_encoder *encoder, uint8_t ctc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_mtc;
+ packet.payload.mtc.ctc = ctc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_cyc(struct pt_encoder *encoder, uint32_t ctc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_cyc;
+ packet.payload.cyc.value = ctc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_stop(struct pt_encoder *encoder)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_stop;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_vmcs(struct pt_encoder *encoder, uint64_t payload)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_vmcs;
+ packet.payload.vmcs.base = payload;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_mnt(struct pt_encoder *encoder, uint64_t payload)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_mnt;
+ packet.payload.mnt.payload = payload;
+
+ return pt_enc_next(encoder, &packet);
+}
diff --git a/libipt/src/pt_error.c b/libipt/src/pt_error.c
new file mode 100644
index 000000000000..c3ee81cfdba1
--- /dev/null
+++ b/libipt/src/pt_error.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "intel-pt.h"
+
+
+const char *pt_errstr(enum pt_error_code errcode)
+{
+ switch (errcode) {
+ case pte_ok:
+ return "OK";
+
+ case pte_internal:
+ return "internal error";
+
+ case pte_invalid:
+ return "invalid argument";
+
+ case pte_nosync:
+ return "decoder out of sync";
+
+ case pte_bad_opc:
+ return "unknown opcode";
+
+ case pte_bad_packet:
+ return "unknown packet";
+
+ case pte_bad_context:
+ return "unexpected packet context";
+
+ case pte_eos:
+ return "reached end of trace stream";
+
+ case pte_bad_query:
+ return "trace stream does not match query";
+
+ case pte_nomem:
+ return "out of memory";
+
+ case pte_bad_config:
+ return "bad configuration";
+
+ case pte_noip:
+ return "no ip";
+
+ case pte_ip_suppressed:
+ return "ip has been suppressed";
+
+ case pte_nomap:
+ return "no memory mapped at this address";
+
+ case pte_bad_insn:
+ return "unknown instruction";
+
+ case pte_no_time:
+ return "no timing information";
+
+ case pte_no_cbr:
+ return "no core:bus ratio";
+
+ case pte_bad_image:
+ return "bad image";
+
+ case pte_bad_lock:
+ return "locking error";
+
+ case pte_not_supported:
+ return "not supported";
+
+ case pte_retstack_empty:
+ return "compressed return without call";
+
+ case pte_bad_retcomp:
+ return "bad compressed return";
+
+ case pte_bad_status_update:
+ return "bad status update";
+
+ case pte_no_enable:
+ return "expected tracing enabled event";
+
+ case pte_event_ignored:
+ return "event ignored";
+
+ case pte_overflow:
+ return "overflow";
+
+ case pte_bad_file:
+ return "bad file";
+
+ case pte_bad_cpu:
+ return "unknown cpu";
+ }
+
+ /* Should not reach here. */
+ return "internal error.";
+}
diff --git a/libipt/src/pt_event_queue.c b/libipt/src/pt_event_queue.c
new file mode 100644
index 000000000000..89518ea3e041
--- /dev/null
+++ b/libipt/src/pt_event_queue.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_event_queue.h"
+
+#include <string.h>
+
+
+static inline uint8_t pt_evq_inc(uint8_t idx)
+{
+ idx += 1;
+ idx %= evq_max;
+
+ return idx;
+}
+
+static struct pt_event *pt_event_init(struct pt_event *event)
+{
+ if (event)
+ memset(event, 0, sizeof(*event));
+
+ return event;
+}
+
+void pt_evq_init(struct pt_event_queue *evq)
+{
+ if (!evq)
+ return;
+
+ memset(evq, 0, sizeof(*evq));
+}
+
+struct pt_event *pt_evq_standalone(struct pt_event_queue *evq)
+{
+ if (!evq)
+ return NULL;
+
+ return pt_event_init(&evq->standalone);
+}
+
+struct pt_event *pt_evq_enqueue(struct pt_event_queue *evq,
+ enum pt_event_binding evb)
+{
+ uint8_t begin, end, gap, idx;
+
+ if (!evq)
+ return NULL;
+
+ if (evb_max <= evb)
+ return NULL;
+
+ begin = evq->begin[evb];
+ idx = evq->end[evb];
+
+ if (evq_max <= begin)
+ return NULL;
+
+ if (evq_max <= idx)
+ return NULL;
+
+ end = pt_evq_inc(idx);
+ gap = pt_evq_inc(end);
+
+ /* Leave a gap so we don't overwrite the last dequeued event. */
+ if (begin == gap)
+ return NULL;
+
+ evq->end[evb] = end;
+
+ return pt_event_init(&evq->queue[evb][idx]);
+}
+
+struct pt_event *pt_evq_dequeue(struct pt_event_queue *evq,
+ enum pt_event_binding evb)
+{
+ uint8_t begin, end;
+
+ if (!evq)
+ return NULL;
+
+ if (evb_max <= evb)
+ return NULL;
+
+ begin = evq->begin[evb];
+ end = evq->end[evb];
+
+ if (evq_max <= begin)
+ return NULL;
+
+ if (evq_max <= end)
+ return NULL;
+
+ if (begin == end)
+ return NULL;
+
+ evq->begin[evb] = pt_evq_inc(begin);
+
+ return &evq->queue[evb][begin];
+}
+
+int pt_evq_clear(struct pt_event_queue *evq, enum pt_event_binding evb)
+{
+ if (!evq)
+ return -pte_internal;
+
+ if (evb_max <= evb)
+ return -pte_internal;
+
+ evq->begin[evb] = 0;
+ evq->end[evb] = 0;
+
+ return 0;
+}
+
+int pt_evq_empty(const struct pt_event_queue *evq, enum pt_event_binding evb)
+{
+ uint8_t begin, end;
+
+ if (!evq)
+ return -pte_internal;
+
+ if (evb_max <= evb)
+ return -pte_internal;
+
+ begin = evq->begin[evb];
+ end = evq->end[evb];
+
+ if (evq_max <= begin)
+ return -pte_internal;
+
+ if (evq_max <= end)
+ return -pte_internal;
+
+ return begin == end;
+}
+
+int pt_evq_pending(const struct pt_event_queue *evq, enum pt_event_binding evb)
+{
+ int errcode;
+
+ errcode = pt_evq_empty(evq, evb);
+ if (errcode < 0)
+ return errcode;
+
+ return !errcode;
+}
+
+struct pt_event *pt_evq_find(struct pt_event_queue *evq,
+ enum pt_event_binding evb,
+ enum pt_event_type evt)
+{
+ uint8_t begin, end;
+
+ if (!evq)
+ return NULL;
+
+ if (evb_max <= evb)
+ return NULL;
+
+ begin = evq->begin[evb];
+ end = evq->end[evb];
+
+ if (evq_max <= begin)
+ return NULL;
+
+ if (evq_max <= end)
+ return NULL;
+
+ for (; begin != end; begin = pt_evq_inc(begin)) {
+ struct pt_event *ev;
+
+ ev = &evq->queue[evb][begin];
+ if (ev->type == evt)
+ return ev;
+ }
+
+ return NULL;
+}
diff --git a/libipt/src/pt_ild.c b/libipt/src/pt_ild.c
new file mode 100644
index 000000000000..a8d78d4102e4
--- /dev/null
+++ b/libipt/src/pt_ild.c
@@ -0,0 +1,1223 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_ild.h"
+#include "pti-imm-defs.h"
+#include "pti-imm.h"
+#include "pti-modrm-defs.h"
+#include "pti-modrm.h"
+#include "pti-disp-defs.h"
+#include "pti-disp.h"
+
+#include <string.h>
+
+/* SET UP 3 TABLES */
+
+static uint8_t has_disp_regular[4][4][8];
+
+static void init_has_disp_regular_table(void)
+{
+ uint8_t mod, rm;
+
+ memset(has_disp_regular, 0, sizeof(has_disp_regular));
+
+ /*fill eamode16 */
+ has_disp_regular[ptem_16bit][0][6] = 2;
+ for (rm = 0; rm < 8; rm++)
+ for (mod = 1; mod <= 2; mod++)
+ has_disp_regular[ptem_16bit][mod][rm] = mod;
+
+ /*fill eamode32/64 */
+ has_disp_regular[ptem_32bit][0][5] = 4;
+ has_disp_regular[ptem_64bit][0][5] = 4;
+ for (rm = 0; rm < 8; rm++) {
+ has_disp_regular[ptem_32bit][1][rm] = 1;
+ has_disp_regular[ptem_32bit][2][rm] = 4;
+
+ has_disp_regular[ptem_64bit][1][rm] = 1;
+ has_disp_regular[ptem_64bit][2][rm] = 4;
+ }
+}
+
+static uint8_t eamode_table[2][4];
+
+static void init_eamode_table(void)
+{
+ eamode_table[0][ptem_unknown] = ptem_unknown;
+ eamode_table[0][ptem_16bit] = ptem_16bit;
+ eamode_table[0][ptem_32bit] = ptem_32bit;
+ eamode_table[0][ptem_64bit] = ptem_64bit;
+
+ eamode_table[1][ptem_unknown] = ptem_unknown;
+ eamode_table[1][ptem_16bit] = ptem_32bit;
+ eamode_table[1][ptem_32bit] = ptem_16bit;
+ eamode_table[1][ptem_64bit] = ptem_32bit;
+}
+
+static uint8_t has_sib_table[4][4][8];
+
+static void init_has_sib_table(void)
+{
+ uint8_t mod;
+
+ memset(has_sib_table, 0, sizeof(has_sib_table));
+
+ /*for eamode32/64 there is sib byte for mod!=3 and rm==4 */
+ for (mod = 0; mod <= 2; mod++) {
+ has_sib_table[ptem_32bit][mod][4] = 1;
+ has_sib_table[ptem_64bit][mod][4] = 1;
+ }
+}
+
+/* SOME ACCESSORS */
+
+static inline uint8_t get_byte(const struct pt_ild *ild, uint8_t i)
+{
+ return ild->itext[i];
+}
+
+static inline uint8_t const *get_byte_ptr(const struct pt_ild *ild, uint8_t i)
+{
+ return ild->itext + i;
+}
+
+static inline int mode_64b(const struct pt_ild *ild)
+{
+ return ild->mode == ptem_64bit;
+}
+
+static inline int mode_32b(const struct pt_ild *ild)
+{
+ return ild->mode == ptem_32bit;
+}
+
+static inline int bits_match(uint8_t x, uint8_t mask, uint8_t target)
+{
+ return (x & mask) == target;
+}
+
+static inline enum pt_exec_mode
+pti_get_nominal_eosz_non64(const struct pt_ild *ild)
+{
+ if (mode_32b(ild)) {
+ if (ild->u.s.osz)
+ return ptem_16bit;
+ return ptem_32bit;
+ }
+ if (ild->u.s.osz)
+ return ptem_32bit;
+ return ptem_16bit;
+}
+
+static inline enum pt_exec_mode
+pti_get_nominal_eosz(const struct pt_ild *ild)
+{
+ if (mode_64b(ild)) {
+ if (ild->u.s.rex_w)
+ return ptem_64bit;
+ if (ild->u.s.osz)
+ return ptem_16bit;
+ return ptem_32bit;
+ }
+ return pti_get_nominal_eosz_non64(ild);
+}
+
+static inline enum pt_exec_mode
+pti_get_nominal_eosz_df64(const struct pt_ild *ild)
+{
+ if (mode_64b(ild)) {
+ if (ild->u.s.rex_w)
+ return ptem_64bit;
+ if (ild->u.s.osz)
+ return ptem_16bit;
+ /* only this next line of code is different relative
+ to pti_get_nominal_eosz(), above */
+ return ptem_64bit;
+ }
+ return pti_get_nominal_eosz_non64(ild);
+}
+
+static inline enum pt_exec_mode
+pti_get_nominal_easz_non64(const struct pt_ild *ild)
+{
+ if (mode_32b(ild)) {
+ if (ild->u.s.asz)
+ return ptem_16bit;
+ return ptem_32bit;
+ }
+ if (ild->u.s.asz)
+ return ptem_32bit;
+ return ptem_16bit;
+}
+
+static inline enum pt_exec_mode
+pti_get_nominal_easz(const struct pt_ild *ild)
+{
+ if (mode_64b(ild)) {
+ if (ild->u.s.asz)
+ return ptem_32bit;
+ return ptem_64bit;
+ }
+ return pti_get_nominal_easz_non64(ild);
+}
+
+static inline int resolve_z(uint8_t *pbytes, enum pt_exec_mode eosz)
+{
+ static const uint8_t bytes[] = { 2, 4, 4 };
+ unsigned int idx;
+
+ if (!pbytes)
+ return -pte_internal;
+
+ idx = (unsigned int) eosz - 1;
+ if (sizeof(bytes) <= idx)
+ return -pte_bad_insn;
+
+ *pbytes = bytes[idx];
+ return 0;
+}
+
+static inline int resolve_v(uint8_t *pbytes, enum pt_exec_mode eosz)
+{
+ static const uint8_t bytes[] = { 2, 4, 8 };
+ unsigned int idx;
+
+ if (!pbytes)
+ return -pte_internal;
+
+ idx = (unsigned int) eosz - 1;
+ if (sizeof(bytes) <= idx)
+ return -pte_bad_insn;
+
+ *pbytes = bytes[idx];
+ return 0;
+}
+
+/* DECODERS */
+
+static int set_imm_bytes(struct pt_ild *ild)
+{
+ /*: set ild->imm1_bytes and ild->imm2_bytes for maps 0/1 */
+ static uint8_t const *const map_map[] = {
+ /* map 0 */ imm_bytes_map_0x0,
+ /* map 1 */ imm_bytes_map_0x0F
+ };
+ uint8_t map, imm_code;
+
+ if (!ild)
+ return -pte_internal;
+
+ map = ild->map;
+
+ if ((sizeof(map_map) / sizeof(*map_map)) <= map)
+ return 0;
+
+ imm_code = map_map[map][ild->nominal_opcode];
+ switch (imm_code) {
+ case PTI_IMM_NONE:
+ case PTI_0_IMM_WIDTH_CONST_l2:
+ default:
+ return 0;
+
+ case PTI_UIMM8_IMM_WIDTH_CONST_l2:
+ ild->imm1_bytes = 1;
+ return 0;
+
+ case PTI_SIMM8_IMM_WIDTH_CONST_l2:
+ ild->imm1_bytes = 1;
+ return 0;
+
+ case PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2:
+ /* SIMMz(eosz) */
+ return resolve_z(&ild->imm1_bytes, pti_get_nominal_eosz(ild));
+
+ case PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2:
+ /* UIMMv(eosz) */
+ return resolve_v(&ild->imm1_bytes, pti_get_nominal_eosz(ild));
+
+ case PTI_UIMM16_IMM_WIDTH_CONST_l2:
+ ild->imm1_bytes = 2;
+ return 0;
+
+ case PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_DF64_EOSZ_l2:
+ /* push defaults to eosz64 in 64b mode, then uses SIMMz */
+ return resolve_z(&ild->imm1_bytes,
+ pti_get_nominal_eosz_df64(ild));
+
+ case PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf7_l1:
+ if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) < 2) {
+ return resolve_z(&ild->imm1_bytes,
+ pti_get_nominal_eosz(ild));
+ }
+ return 0;
+
+ case PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xc7_l1:
+ if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) == 0) {
+ return resolve_z(&ild->imm1_bytes,
+ pti_get_nominal_eosz(ild));
+ }
+ return 0;
+
+ case PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf6_l1:
+ if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) < 2)
+ ild->imm1_bytes = 1;
+
+ return 0;
+
+ case PTI_IMM_hasimm_map0x0_op0xc8_l1:
+ if (ild->map == PTI_MAP_0) {
+ /*enter -> imm1=2, imm2=1 */
+ ild->imm1_bytes = 2;
+ ild->imm2_bytes = 1;
+ }
+ return 0;
+
+ case PTI_IMM_hasimm_map0x0F_op0x78_l1:
+ /* AMD SSE4a (insertq/extrq use osz/f2) vs vmread
+ * (no prefixes)
+ */
+ if (ild->map == PTI_MAP_1) {
+ if (ild->u.s.osz || ild->u.s.last_f2f3 == 2) {
+ ild->imm1_bytes = 1;
+ ild->imm2_bytes = 1;
+ }
+ }
+ return 0;
+ }
+}
+
+static int imm_dec(struct pt_ild *ild, uint8_t length)
+{
+ int errcode;
+
+ if (!ild)
+ return -pte_internal;
+
+ if (ild->map == PTI_MAP_AMD3DNOW) {
+ if (ild->max_bytes <= length)
+ return -pte_bad_insn;
+
+ ild->nominal_opcode = get_byte(ild, length);
+ return length + 1;
+ }
+
+ errcode = set_imm_bytes(ild);
+ if (errcode < 0)
+ return errcode;
+
+ length += ild->imm1_bytes;
+ length += ild->imm2_bytes;
+ if (ild->max_bytes < length)
+ return -pte_bad_insn;
+
+ return length;
+}
+
+static int compute_disp_dec(struct pt_ild *ild)
+{
+ /* set ild->disp_bytes for maps 0 and 1. */
+ static uint8_t const *const map_map[] = {
+ /* map 0 */ disp_bytes_map_0x0,
+ /* map 1 */ disp_bytes_map_0x0F
+ };
+ uint8_t map, disp_kind;
+
+ if (!ild)
+ return -pte_internal;
+
+ if (0 < ild->disp_bytes)
+ return 0;
+
+ map = ild->map;
+
+ if ((sizeof(map_map) / sizeof(*map_map)) <= map)
+ return 0;
+
+ disp_kind = map_map[map][ild->nominal_opcode];
+ switch (disp_kind) {
+ case PTI_DISP_NONE:
+ ild->disp_bytes = 0;
+ return 0;
+
+ case PTI_PRESERVE_DEFAULT:
+ /* nothing to do */
+ return 0;
+
+ case PTI_BRDISP8:
+ ild->disp_bytes = 1;
+ return 0;
+
+ case PTI_DISP_BUCKET_0_l1:
+ /* BRDISPz(eosz) for 16/32 modes, and BRDISP32 for 64b mode */
+ if (mode_64b(ild)) {
+ ild->disp_bytes = 4;
+ return 0;
+ }
+
+ return resolve_z(&ild->disp_bytes,
+ pti_get_nominal_eosz(ild));
+
+ case PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2:
+ /* MEMDISPv(easz) */
+ return resolve_v(&ild->disp_bytes, pti_get_nominal_easz(ild));
+
+ case PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2:
+ /* BRDISPz(eosz) for 16/32/64 modes */
+ return resolve_z(&ild->disp_bytes, pti_get_nominal_eosz(ild));
+
+ case PTI_RESOLVE_BYREG_DISP_map0x0_op0xc7_l1:
+ /* reg=0 -> preserve, reg=7 -> BRDISPz(eosz) */
+ if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) == 7) {
+ return resolve_z(&ild->disp_bytes,
+ pti_get_nominal_eosz(ild));
+ }
+ return 0;
+
+ default:
+ return -pte_bad_insn;
+ }
+}
+
+static int disp_dec(struct pt_ild *ild, uint8_t length)
+{
+ uint8_t disp_bytes;
+ int errcode;
+
+ if (!ild)
+ return -pte_internal;
+
+ errcode = compute_disp_dec(ild);
+ if (errcode < 0)
+ return errcode;
+
+ disp_bytes = ild->disp_bytes;
+ if (disp_bytes == 0)
+ return imm_dec(ild, length);
+
+ if (length + disp_bytes > ild->max_bytes)
+ return -pte_bad_insn;
+
+ /*Record only position; must be able to re-read itext bytes for actual
+ value. (SMC/CMC issue). */
+ ild->disp_pos = length;
+
+ return imm_dec(ild, length + disp_bytes);
+}
+
+static int sib_dec(struct pt_ild *ild, uint8_t length)
+{
+ uint8_t sib;
+
+ if (!ild)
+ return -pte_internal;
+
+ if (ild->max_bytes <= length)
+ return -pte_bad_insn;
+
+ sib = get_byte(ild, length);
+ if ((sib & 0x07) == 0x05 && pti_get_modrm_mod(ild) == 0)
+ ild->disp_bytes = 4;
+
+ return disp_dec(ild, length + 1);
+}
+
+static int modrm_dec(struct pt_ild *ild, uint8_t length)
+{
+ static uint8_t const *const has_modrm_2d[2] = {
+ has_modrm_map_0x0,
+ has_modrm_map_0x0F
+ };
+ int has_modrm = PTI_MODRM_FALSE;
+ pti_map_enum_t map;
+
+ if (!ild)
+ return -pte_internal;
+
+ map = pti_get_map(ild);
+ if (map >= PTI_MAP_2)
+ has_modrm = PTI_MODRM_TRUE;
+ else
+ has_modrm = has_modrm_2d[map][ild->nominal_opcode];
+
+ if (has_modrm == PTI_MODRM_FALSE || has_modrm == PTI_MODRM_UNDEF)
+ return disp_dec(ild, length);
+
+ /* really >= here because we have not eaten the byte yet */
+ if (length >= ild->max_bytes)
+ return -pte_bad_insn;
+
+ ild->modrm_byte = get_byte(ild, length);
+
+ if (has_modrm != PTI_MODRM_IGNORE_MOD) {
+ /* set disp_bytes and sib using simple tables */
+
+ uint8_t eamode = eamode_table[ild->u.s.asz][ild->mode];
+ uint8_t mod = (uint8_t) pti_get_modrm_mod(ild);
+ uint8_t rm = (uint8_t) pti_get_modrm_rm(ild);
+ uint8_t has_sib;
+
+ ild->disp_bytes = has_disp_regular[eamode][mod][rm];
+
+ has_sib = has_sib_table[eamode][mod][rm];
+ if (has_sib)
+ return sib_dec(ild, length + 1);
+ }
+
+ return disp_dec(ild, length + 1);
+}
+
+static inline int get_next_as_opcode(struct pt_ild *ild, uint8_t length)
+{
+ if (!ild)
+ return -pte_internal;
+
+ if (ild->max_bytes <= length)
+ return -pte_bad_insn;
+
+ ild->nominal_opcode = get_byte(ild, length);
+
+ return modrm_dec(ild, length + 1);
+}
+
+static int opcode_dec(struct pt_ild *ild, uint8_t length)
+{
+ uint8_t b, m;
+
+ if (!ild)
+ return -pte_internal;
+
+ /*no need to check max_bytes - it was checked in previous scanners */
+ b = get_byte(ild, length);
+ if (b != 0x0F) { /* 1B opcodes, map 0 */
+ ild->map = PTI_MAP_0;
+ ild->nominal_opcode = b;
+
+ return modrm_dec(ild, length + 1);
+ }
+
+ length++; /* eat the 0x0F */
+
+ if (ild->max_bytes <= length)
+ return -pte_bad_insn;
+
+ /* 0x0F opcodes MAPS 1,2,3 */
+ m = get_byte(ild, length);
+ if (m == 0x38) {
+ ild->map = PTI_MAP_2;
+
+ return get_next_as_opcode(ild, length + 1);
+ } else if (m == 0x3A) {
+ ild->map = PTI_MAP_3;
+ ild->imm1_bytes = 1;
+
+ return get_next_as_opcode(ild, length + 1);
+ } else if (bits_match(m, 0xf8, 0x38)) {
+ ild->map = PTI_MAP_INVALID;
+
+ return get_next_as_opcode(ild, length + 1);
+ } else if (m == 0x0F) { /* 3dNow */
+ ild->map = PTI_MAP_AMD3DNOW;
+ ild->imm1_bytes = 1;
+ /* real opcode is in immediate later on, but we need an
+ * opcode now. */
+ ild->nominal_opcode = 0x0F;
+
+ return modrm_dec(ild, length + 1);
+ } else { /* map 1 (simple two byte opcodes) */
+ ild->nominal_opcode = m;
+ ild->map = PTI_MAP_1;
+
+ return modrm_dec(ild, length + 1);
+ }
+}
+
+typedef int (*prefix_decoder)(struct pt_ild *ild, uint8_t length, uint8_t rex);
+static prefix_decoder prefix_table[256];
+
+static inline int prefix_decode(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ uint8_t byte;
+
+ if (!ild)
+ return -pte_internal;
+
+ if (ild->max_bytes <= length)
+ return -pte_bad_insn;
+
+ byte = get_byte(ild, length);
+
+ return prefix_table[byte](ild, length, rex);
+}
+
+static inline int prefix_next(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ return prefix_decode(ild, length + 1, rex);
+}
+
+static int prefix_osz(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ ild->u.s.osz = 1;
+
+ return prefix_next(ild, length, 0);
+}
+
+static int prefix_asz(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ ild->u.s.asz = 1;
+
+ return prefix_next(ild, length, 0);
+}
+
+static int prefix_lock(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ ild->u.s.lock = 1;
+
+ return prefix_next(ild, length, 0);
+}
+
+static int prefix_f2(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ ild->u.s.f2 = 1;
+ ild->u.s.last_f2f3 = 2;
+
+ return prefix_next(ild, length, 0);
+}
+
+static int prefix_f3(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ ild->u.s.f3 = 1;
+ ild->u.s.last_f2f3 = 3;
+
+ return prefix_next(ild, length, 0);
+}
+
+static int prefix_ignore(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ return prefix_next(ild, length, 0);
+}
+
+static int prefix_done(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ if (!ild)
+ return -pte_internal;
+
+ if (rex & 0x04)
+ ild->u.s.rex_r = 1;
+ if (rex & 0x08)
+ ild->u.s.rex_w = 1;
+
+ return opcode_dec(ild, length);
+}
+
+static int prefix_rex(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ if (mode_64b(ild))
+ return prefix_next(ild, length, get_byte(ild, length));
+ else
+ return opcode_dec(ild, length);
+}
+
+static inline int prefix_vex_done(struct pt_ild *ild, uint8_t length)
+{
+ if (!ild)
+ return -pte_internal;
+
+ ild->nominal_opcode = get_byte(ild, length);
+
+ return modrm_dec(ild, length + 1);
+}
+
+static int prefix_vex_c5(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ uint8_t max_bytes;
+ uint8_t p1;
+
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ max_bytes = ild->max_bytes;
+
+ /* Read the next byte to validate that this is indeed VEX. */
+ if (max_bytes <= (length + 1))
+ return -pte_bad_insn;
+
+ p1 = get_byte(ild, length + 1);
+
+ /* If p1[7:6] is not 11b in non-64-bit mode, this is LDS, not VEX. */
+ if (!mode_64b(ild) && !bits_match(p1, 0xc0, 0xc0))
+ return opcode_dec(ild, length);
+
+ /* We need at least 3 bytes
+ * - 2 for the VEX prefix and payload and
+ * - 1 for the opcode.
+ */
+ if (max_bytes < (length + 3))
+ return -pte_bad_insn;
+
+ ild->u.s.vex = 1;
+ if (p1 & 0x80)
+ ild->u.s.rex_r = 1;
+
+ ild->map = PTI_MAP_1;
+
+ /* Eat the VEX. */
+ length += 2;
+ return prefix_vex_done(ild, length);
+}
+
+static int prefix_vex_c4(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ uint8_t max_bytes;
+ uint8_t p1, p2, map;
+
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ max_bytes = ild->max_bytes;
+
+ /* Read the next byte to validate that this is indeed VEX. */
+ if (max_bytes <= (length + 1))
+ return -pte_bad_insn;
+
+ p1 = get_byte(ild, length + 1);
+
+ /* If p1[7:6] is not 11b in non-64-bit mode, this is LES, not VEX. */
+ if (!mode_64b(ild) && !bits_match(p1, 0xc0, 0xc0))
+ return opcode_dec(ild, length);
+
+ /* We need at least 4 bytes
+ * - 3 for the VEX prefix and payload and
+ * - 1 for the opcode.
+ */
+ if (max_bytes < (length + 4))
+ return -pte_bad_insn;
+
+ p2 = get_byte(ild, length + 2);
+
+ ild->u.s.vex = 1;
+ if (p1 & 0x80)
+ ild->u.s.rex_r = 1;
+ if (p2 & 0x80)
+ ild->u.s.rex_w = 1;
+
+ map = p1 & 0x1f;
+ if (PTI_MAP_INVALID <= map)
+ return -pte_bad_insn;
+
+ ild->map = map;
+ if (map == PTI_MAP_3)
+ ild->imm1_bytes = 1;
+
+ /* Eat the VEX. */
+ length += 3;
+ return prefix_vex_done(ild, length);
+}
+
+static int prefix_evex(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ uint8_t max_bytes;
+ uint8_t p1, p2, map;
+
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ max_bytes = ild->max_bytes;
+
+ /* Read the next byte to validate that this is indeed EVEX. */
+ if (max_bytes <= (length + 1))
+ return -pte_bad_insn;
+
+ p1 = get_byte(ild, length + 1);
+
+ /* If p1[7:6] is not 11b in non-64-bit mode, this is BOUND, not EVEX. */
+ if (!mode_64b(ild) && !bits_match(p1, 0xc0, 0xc0))
+ return opcode_dec(ild, length);
+
+ /* We need at least 5 bytes
+ * - 4 for the EVEX prefix and payload and
+ * - 1 for the opcode.
+ */
+ if (max_bytes < (length + 5))
+ return -pte_bad_insn;
+
+ p2 = get_byte(ild, length + 2);
+
+ ild->u.s.vex = 1;
+ if (p1 & 0x80)
+ ild->u.s.rex_r = 1;
+ if (p2 & 0x80)
+ ild->u.s.rex_w = 1;
+
+ map = p1 & 0x03;
+ ild->map = map;
+
+ if (map == PTI_MAP_3)
+ ild->imm1_bytes = 1;
+
+ /* Eat the EVEX. */
+ length += 4;
+ return prefix_vex_done(ild, length);
+}
+
+static void init_prefix_table(void)
+{
+ unsigned int byte;
+
+ for (byte = 0; byte <= 0xff; ++byte)
+ prefix_table[byte] = prefix_done;
+
+ prefix_table[0x66] = prefix_osz;
+ prefix_table[0x67] = prefix_asz;
+
+ /* Segment prefixes. */
+ prefix_table[0x2e] = prefix_ignore;
+ prefix_table[0x3e] = prefix_ignore;
+ prefix_table[0x26] = prefix_ignore;
+ prefix_table[0x36] = prefix_ignore;
+ prefix_table[0x64] = prefix_ignore;
+ prefix_table[0x65] = prefix_ignore;
+
+ prefix_table[0xf0] = prefix_lock;
+ prefix_table[0xf2] = prefix_f2;
+ prefix_table[0xf3] = prefix_f3;
+
+ for (byte = 0x40; byte <= 0x4f; ++byte)
+ prefix_table[byte] = prefix_rex;
+
+ prefix_table[0xc4] = prefix_vex_c4;
+ prefix_table[0xc5] = prefix_vex_c5;
+ prefix_table[0x62] = prefix_evex;
+}
+
+static int decode(struct pt_ild *ild)
+{
+ return prefix_decode(ild, 0, 0);
+}
+
+static int set_branch_target(struct pt_insn_ext *iext, const struct pt_ild *ild)
+{
+ if (!iext || !ild)
+ return -pte_internal;
+
+ iext->variant.branch.is_direct = 1;
+
+ if (ild->disp_bytes == 1) {
+ const int8_t *b = (const int8_t *)
+ get_byte_ptr(ild, ild->disp_pos);
+
+ iext->variant.branch.displacement = *b;
+ } else if (ild->disp_bytes == 2) {
+ const int16_t *w = (const int16_t *)
+ get_byte_ptr(ild, ild->disp_pos);
+
+ iext->variant.branch.displacement = *w;
+ } else if (ild->disp_bytes == 4) {
+ const int32_t *d = (const int32_t *)
+ get_byte_ptr(ild, ild->disp_pos);
+
+ iext->variant.branch.displacement = *d;
+ } else
+ return -pte_bad_insn;
+
+ return 0;
+}
+
+/* MAIN ENTRY POINTS */
+
+void pt_ild_init(void)
+{ /* initialization */
+ init_has_disp_regular_table();
+ init_has_sib_table();
+ init_eamode_table();
+ init_prefix_table();
+}
+
+static int pt_instruction_length_decode(struct pt_ild *ild)
+{
+ if (!ild)
+ return -pte_internal;
+
+ ild->u.i = 0;
+ ild->imm1_bytes = 0;
+ ild->imm2_bytes = 0;
+ ild->disp_bytes = 0;
+ ild->modrm_byte = 0;
+ ild->map = PTI_MAP_INVALID;
+
+ if (!ild->mode)
+ return -pte_bad_insn;
+
+ return decode(ild);
+}
+
+static int pt_instruction_decode(struct pt_insn *insn, struct pt_insn_ext *iext,
+ const struct pt_ild *ild)
+{
+ uint8_t opcode, map;
+
+ if (!iext || !ild)
+ return -pte_internal;
+
+ iext->iclass = PTI_INST_INVALID;
+ memset(&iext->variant, 0, sizeof(iext->variant));
+
+ insn->iclass = ptic_other;
+
+ opcode = ild->nominal_opcode;
+ map = ild->map;
+
+ if (map > PTI_MAP_1)
+ return 0; /* uninteresting */
+ if (ild->u.s.vex)
+ return 0; /* uninteresting */
+
+ /* PTI_INST_JCC, 70...7F, 0F (0x80...0x8F) */
+ if (opcode >= 0x70 && opcode <= 0x7F) {
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_cond_jump;
+ iext->iclass = PTI_INST_JCC;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+ }
+ if (opcode >= 0x80 && opcode <= 0x8F) {
+ if (map == PTI_MAP_1) {
+ insn->iclass = ptic_cond_jump;
+ iext->iclass = PTI_INST_JCC;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+ }
+
+ switch (ild->nominal_opcode) {
+ case 0x9A:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_CALL_9A;
+ }
+ return 0;
+
+ case 0xFF:
+ if (map == PTI_MAP_0) {
+ uint8_t reg = pti_get_modrm_reg(ild);
+
+ if (reg == 2) {
+ insn->iclass = ptic_call;
+ iext->iclass = PTI_INST_CALL_FFr2;
+ } else if (reg == 3) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_CALL_FFr3;
+ } else if (reg == 4) {
+ insn->iclass = ptic_jump;
+ iext->iclass = PTI_INST_JMP_FFr4;
+ } else if (reg == 5) {
+ insn->iclass = ptic_far_jump;
+ iext->iclass = PTI_INST_JMP_FFr5;
+ }
+ }
+ return 0;
+
+ case 0xE8:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_call;
+ iext->iclass = PTI_INST_CALL_E8;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0xCD:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_INT;
+ }
+
+ return 0;
+
+ case 0xCC:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_INT3;
+ }
+
+ return 0;
+
+ case 0xCE:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_INTO;
+ }
+
+ return 0;
+
+ case 0xF1:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_INT1;
+ }
+
+ return 0;
+
+ case 0xCF:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_IRET;
+ }
+ return 0;
+
+ case 0xE9:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_jump;
+ iext->iclass = PTI_INST_JMP_E9;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0xEA:
+ if (map == PTI_MAP_0) {
+ /* Far jumps are treated as indirect jumps. */
+ insn->iclass = ptic_far_jump;
+ iext->iclass = PTI_INST_JMP_EA;
+ }
+ return 0;
+
+ case 0xEB:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_jump;
+ iext->iclass = PTI_INST_JMP_EB;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0xE3:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_cond_jump;
+ iext->iclass = PTI_INST_JrCXZ;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0xE0:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_cond_jump;
+ iext->iclass = PTI_INST_LOOPNE;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0xE1:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_cond_jump;
+ iext->iclass = PTI_INST_LOOPE;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0xE2:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_cond_jump;
+ iext->iclass = PTI_INST_LOOP;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0x22:
+ if (map == PTI_MAP_1)
+ if (pti_get_modrm_reg(ild) == 3)
+ if (!ild->u.s.rex_r)
+ iext->iclass = PTI_INST_MOV_CR3;
+
+ return 0;
+
+ case 0xC3:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_return;
+ iext->iclass = PTI_INST_RET_C3;
+ }
+ return 0;
+
+ case 0xC2:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_return;
+ iext->iclass = PTI_INST_RET_C2;
+ }
+ return 0;
+
+ case 0xCB:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_RET_CB;
+ }
+ return 0;
+
+ case 0xCA:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_RET_CA;
+ }
+ return 0;
+
+ case 0x05:
+ if (map == PTI_MAP_1) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_SYSCALL;
+ }
+ return 0;
+
+ case 0x34:
+ if (map == PTI_MAP_1) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_SYSENTER;
+ }
+ return 0;
+
+ case 0x35:
+ if (map == PTI_MAP_1) {
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_SYSEXIT;
+ }
+ return 0;
+
+ case 0x07:
+ if (map == PTI_MAP_1) {
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_SYSRET;
+ }
+ return 0;
+
+ case 0x01:
+ if (map == PTI_MAP_1) {
+ switch (ild->modrm_byte) {
+ case 0xc1:
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_VMCALL;
+ break;
+
+ case 0xc2:
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_VMLAUNCH;
+ break;
+
+ case 0xc3:
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_VMRESUME;
+ break;
+
+ default:
+ break;
+ }
+ }
+ return 0;
+
+ case 0xc7:
+ if (map == PTI_MAP_1 &&
+ pti_get_modrm_mod(ild) != 3 &&
+ pti_get_modrm_reg(ild) == 6)
+ iext->iclass = PTI_INST_VMPTRLD;
+
+ return 0;
+
+ case 0xae:
+ if (map == PTI_MAP_1 && ild->u.s.f3 && !ild->u.s.osz &&
+ pti_get_modrm_reg(ild) == 4) {
+ insn->iclass = ptic_ptwrite;
+ iext->iclass = PTI_INST_PTWRITE;
+ }
+ return 0;
+
+ default:
+ return 0;
+ }
+}
+
+int pt_ild_decode(struct pt_insn *insn, struct pt_insn_ext *iext)
+{
+ struct pt_ild ild;
+ int size;
+
+ if (!insn || !iext)
+ return -pte_internal;
+
+ ild.mode = insn->mode;
+ ild.itext = insn->raw;
+ ild.max_bytes = insn->size;
+
+ size = pt_instruction_length_decode(&ild);
+ if (size < 0)
+ return size;
+
+ insn->size = (uint8_t) size;
+
+ return pt_instruction_decode(insn, iext, &ild);
+}
diff --git a/libipt/src/pt_image.c b/libipt/src/pt_image.c
new file mode 100644
index 000000000000..b22c62601a56
--- /dev/null
+++ b/libipt/src/pt_image.c
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_image.h"
+#include "pt_section.h"
+#include "pt_asid.h"
+#include "pt_image_section_cache.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+
+static char *dupstr(const char *str)
+{
+ char *dup;
+ size_t len;
+
+ if (!str)
+ return NULL;
+
+ len = strlen(str);
+ dup = malloc(len + 1);
+ if (!dup)
+ return NULL;
+
+ return strcpy(dup, str);
+}
+
+static struct pt_section_list *pt_mk_section_list(struct pt_section *section,
+ const struct pt_asid *asid,
+ uint64_t vaddr,
+ uint64_t offset,
+ uint64_t size, int isid)
+{
+ struct pt_section_list *list;
+ int errcode;
+
+ list = malloc(sizeof(*list));
+ if (!list)
+ return NULL;
+
+ memset(list, 0, sizeof(*list));
+
+ errcode = pt_section_get(section);
+ if (errcode < 0)
+ goto out_mem;
+
+ pt_msec_init(&list->section, section, asid, vaddr, offset, size);
+ list->isid = isid;
+
+ return list;
+
+out_mem:
+ free(list);
+ return NULL;
+}
+
+static void pt_section_list_free(struct pt_section_list *list)
+{
+ if (!list)
+ return;
+
+ pt_section_put(list->section.section);
+ pt_msec_fini(&list->section);
+ free(list);
+}
+
+static void pt_section_list_free_tail(struct pt_section_list *list)
+{
+ while (list) {
+ struct pt_section_list *trash;
+
+ trash = list;
+ list = list->next;
+
+ pt_section_list_free(trash);
+ }
+}
+
+void pt_image_init(struct pt_image *image, const char *name)
+{
+ if (!image)
+ return;
+
+ memset(image, 0, sizeof(*image));
+
+ image->name = dupstr(name);
+}
+
+void pt_image_fini(struct pt_image *image)
+{
+ if (!image)
+ return;
+
+ pt_section_list_free_tail(image->sections);
+ free(image->name);
+
+ memset(image, 0, sizeof(*image));
+}
+
+struct pt_image *pt_image_alloc(const char *name)
+{
+ struct pt_image *image;
+
+ image = malloc(sizeof(*image));
+ if (image)
+ pt_image_init(image, name);
+
+ return image;
+}
+
+void pt_image_free(struct pt_image *image)
+{
+ pt_image_fini(image);
+ free(image);
+}
+
+const char *pt_image_name(const struct pt_image *image)
+{
+ if (!image)
+ return NULL;
+
+ return image->name;
+}
+
+int pt_image_add(struct pt_image *image, struct pt_section *section,
+ const struct pt_asid *asid, uint64_t vaddr, int isid)
+{
+ struct pt_section_list **list, *next, *removed, *new;
+ uint64_t size, begin, end;
+ int errcode;
+
+ if (!image || !section)
+ return -pte_internal;
+
+ size = pt_section_size(section);
+ begin = vaddr;
+ end = begin + size;
+
+ next = pt_mk_section_list(section, asid, begin, 0ull, size, isid);
+ if (!next)
+ return -pte_nomem;
+
+ removed = NULL;
+ errcode = 0;
+
+ /* Check for overlaps while we move to the end of the list. */
+ list = &(image->sections);
+ while (*list) {
+ const struct pt_mapped_section *msec;
+ const struct pt_asid *masid;
+ struct pt_section_list *current;
+ struct pt_section *lsec;
+ uint64_t lbegin, lend, loff;
+
+ current = *list;
+ msec = &current->section;
+ masid = pt_msec_asid(msec);
+
+ errcode = pt_asid_match(masid, asid);
+ if (errcode < 0)
+ break;
+
+ if (!errcode) {
+ list = &((*list)->next);
+ continue;
+ }
+
+ lbegin = pt_msec_begin(msec);
+ lend = pt_msec_end(msec);
+
+ if ((end <= lbegin) || (lend <= begin)) {
+ list = &((*list)->next);
+ continue;
+ }
+
+ /* The new section overlaps with @msec's section. */
+ lsec = pt_msec_section(msec);
+ loff = pt_msec_offset(msec);
+
+ /* We remove @msec and insert new sections for the remaining
+ * parts, if any. Those new sections are not mapped initially
+ * and need to be added to the end of the section list.
+ */
+ *list = current->next;
+
+ /* Keep a list of removed sections so we can re-add them in case
+ * of errors.
+ */
+ current->next = removed;
+ removed = current;
+
+ /* Add a section covering the remaining bytes at the front. */
+ if (lbegin < begin) {
+ new = pt_mk_section_list(lsec, masid, lbegin, loff,
+ begin - lbegin, current->isid);
+ if (!new) {
+ errcode = -pte_nomem;
+ break;
+ }
+
+ new->next = next;
+ next = new;
+ }
+
+ /* Add a section covering the remaining bytes at the back. */
+ if (end < lend) {
+ new = pt_mk_section_list(lsec, masid, end,
+ loff + (end - lbegin),
+ lend - end, current->isid);
+ if (!new) {
+ errcode = -pte_nomem;
+ break;
+ }
+
+ new->next = next;
+ next = new;
+ }
+ }
+
+ if (errcode < 0) {
+ pt_section_list_free_tail(next);
+
+ /* Re-add removed sections to the tail of the section list. */
+ for (; *list; list = &((*list)->next))
+ ;
+
+ *list = removed;
+ return errcode;
+ }
+
+ pt_section_list_free_tail(removed);
+
+ *list = next;
+ return 0;
+}
+
+int pt_image_remove(struct pt_image *image, struct pt_section *section,
+ const struct pt_asid *asid, uint64_t vaddr)
+{
+ struct pt_section_list **list;
+
+ if (!image || !section)
+ return -pte_internal;
+
+ for (list = &image->sections; *list; list = &((*list)->next)) {
+ struct pt_mapped_section *msec;
+ const struct pt_section *sec;
+ const struct pt_asid *masid;
+ struct pt_section_list *trash;
+ uint64_t begin;
+ int errcode;
+
+ trash = *list;
+ msec = &trash->section;
+ masid = pt_msec_asid(msec);
+
+ errcode = pt_asid_match(masid, asid);
+ if (errcode < 0)
+ return errcode;
+
+ if (!errcode)
+ continue;
+
+ begin = pt_msec_begin(msec);
+ sec = pt_msec_section(msec);
+ if (sec == section && begin == vaddr) {
+ *list = trash->next;
+ pt_section_list_free(trash);
+
+ return 0;
+ }
+ }
+
+ return -pte_bad_image;
+}
+
+int pt_image_add_file(struct pt_image *image, const char *filename,
+ uint64_t offset, uint64_t size,
+ const struct pt_asid *uasid, uint64_t vaddr)
+{
+ struct pt_section *section;
+ struct pt_asid asid;
+ int errcode;
+
+ if (!image || !filename)
+ return -pte_invalid;
+
+ errcode = pt_asid_from_user(&asid, uasid);
+ if (errcode < 0)
+ return errcode;
+
+ section = pt_mk_section(filename, offset, size);
+ if (!section)
+ return -pte_invalid;
+
+ errcode = pt_image_add(image, section, &asid, vaddr, 0);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ return errcode;
+ }
+
+ /* The image list got its own reference; let's drop ours. */
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+
+ return 0;
+}
+
+int pt_image_copy(struct pt_image *image, const struct pt_image *src)
+{
+ struct pt_section_list *list;
+ int ignored;
+
+ if (!image || !src)
+ return -pte_invalid;
+
+ /* There is nothing to do if we copy an image to itself.
+ *
+ * Besides, pt_image_add() may move sections around, which would
+ * interfere with our section iteration.
+ */
+ if (image == src)
+ return 0;
+
+ ignored = 0;
+ for (list = src->sections; list; list = list->next) {
+ int errcode;
+
+ errcode = pt_image_add(image, list->section.section,
+ &list->section.asid,
+ list->section.vaddr,
+ list->isid);
+ if (errcode < 0)
+ ignored += 1;
+ }
+
+ return ignored;
+}
+
+int pt_image_remove_by_filename(struct pt_image *image, const char *filename,
+ const struct pt_asid *uasid)
+{
+ struct pt_section_list **list;
+ struct pt_asid asid;
+ int errcode, removed;
+
+ if (!image || !filename)
+ return -pte_invalid;
+
+ errcode = pt_asid_from_user(&asid, uasid);
+ if (errcode < 0)
+ return errcode;
+
+ removed = 0;
+ for (list = &image->sections; *list;) {
+ struct pt_mapped_section *msec;
+ const struct pt_section *sec;
+ const struct pt_asid *masid;
+ struct pt_section_list *trash;
+ const char *tname;
+
+ trash = *list;
+ msec = &trash->section;
+ masid = pt_msec_asid(msec);
+
+ errcode = pt_asid_match(masid, &asid);
+ if (errcode < 0)
+ return errcode;
+
+ if (!errcode) {
+ list = &trash->next;
+ continue;
+ }
+
+ sec = pt_msec_section(msec);
+ tname = pt_section_filename(sec);
+
+ if (tname && (strcmp(tname, filename) == 0)) {
+ *list = trash->next;
+ pt_section_list_free(trash);
+
+ removed += 1;
+ } else
+ list = &trash->next;
+ }
+
+ return removed;
+}
+
+int pt_image_remove_by_asid(struct pt_image *image,
+ const struct pt_asid *uasid)
+{
+ struct pt_section_list **list;
+ struct pt_asid asid;
+ int errcode, removed;
+
+ if (!image)
+ return -pte_invalid;
+
+ errcode = pt_asid_from_user(&asid, uasid);
+ if (errcode < 0)
+ return errcode;
+
+ removed = 0;
+ for (list = &image->sections; *list;) {
+ struct pt_mapped_section *msec;
+ const struct pt_asid *masid;
+ struct pt_section_list *trash;
+
+ trash = *list;
+ msec = &trash->section;
+ masid = pt_msec_asid(msec);
+
+ errcode = pt_asid_match(masid, &asid);
+ if (errcode < 0)
+ return errcode;
+
+ if (!errcode) {
+ list = &trash->next;
+ continue;
+ }
+
+ *list = trash->next;
+ pt_section_list_free(trash);
+
+ removed += 1;
+ }
+
+ return removed;
+}
+
+int pt_image_set_callback(struct pt_image *image,
+ read_memory_callback_t *callback, void *context)
+{
+ if (!image)
+ return -pte_invalid;
+
+ image->readmem.callback = callback;
+ image->readmem.context = context;
+
+ return 0;
+}
+
+static int pt_image_read_callback(struct pt_image *image, int *isid,
+ uint8_t *buffer, uint16_t size,
+ const struct pt_asid *asid, uint64_t addr)
+{
+ read_memory_callback_t *callback;
+
+ if (!image || !isid)
+ return -pte_internal;
+
+ callback = image->readmem.callback;
+ if (!callback)
+ return -pte_nomap;
+
+ *isid = 0;
+
+ return callback(buffer, size, asid, addr, image->readmem.context);
+}
+
+/* Check whether a mapped section contains an address.
+ *
+ * Returns zero if @msec contains @vaddr.
+ * Returns a negative error code otherwise.
+ * Returns -pte_nomap if @msec does not contain @vaddr.
+ */
+static inline int pt_image_check_msec(const struct pt_mapped_section *msec,
+ const struct pt_asid *asid,
+ uint64_t vaddr)
+{
+ const struct pt_asid *masid;
+ uint64_t begin, end;
+ int errcode;
+
+ if (!msec)
+ return -pte_internal;
+
+ begin = pt_msec_begin(msec);
+ end = pt_msec_end(msec);
+ if (vaddr < begin || end <= vaddr)
+ return -pte_nomap;
+
+ masid = pt_msec_asid(msec);
+ errcode = pt_asid_match(masid, asid);
+ if (errcode <= 0) {
+ if (!errcode)
+ errcode = -pte_nomap;
+
+ return errcode;
+ }
+
+ return 0;
+}
+
+/* Find the section containing a given address in a given address space.
+ *
+ * On success, the found section is moved to the front of the section list.
+ * If caching is enabled, maps the section.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_image_fetch_section(struct pt_image *image,
+ const struct pt_asid *asid, uint64_t vaddr)
+{
+ struct pt_section_list **start, **list;
+
+ if (!image)
+ return -pte_internal;
+
+ start = &image->sections;
+ for (list = start; *list;) {
+ struct pt_mapped_section *msec;
+ struct pt_section_list *elem;
+ int errcode;
+
+ elem = *list;
+ msec = &elem->section;
+
+ errcode = pt_image_check_msec(msec, asid, vaddr);
+ if (errcode < 0) {
+ if (errcode != -pte_nomap)
+ return errcode;
+
+ list = &elem->next;
+ continue;
+ }
+
+ /* Move the section to the front if it isn't already. */
+ if (list != start) {
+ *list = elem->next;
+ elem->next = *start;
+ *start = elem;
+ }
+
+ return 0;
+ }
+
+ return -pte_nomap;
+}
+
+int pt_image_read(struct pt_image *image, int *isid, uint8_t *buffer,
+ uint16_t size, const struct pt_asid *asid, uint64_t addr)
+{
+ struct pt_mapped_section *msec;
+ struct pt_section_list *slist;
+ struct pt_section *section;
+ int errcode, status;
+
+ if (!image || !isid)
+ return -pte_internal;
+
+ errcode = pt_image_fetch_section(image, asid, addr);
+ if (errcode < 0) {
+ if (errcode != -pte_nomap)
+ return errcode;
+
+ return pt_image_read_callback(image, isid, buffer, size, asid,
+ addr);
+ }
+
+ slist = image->sections;
+ if (!slist)
+ return -pte_internal;
+
+ *isid = slist->isid;
+ msec = &slist->section;
+
+ section = pt_msec_section(msec);
+
+ errcode = pt_section_map(section);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_msec_read(msec, buffer, size, addr);
+
+ errcode = pt_section_unmap(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (status < 0) {
+ if (status != -pte_nomap)
+ return status;
+
+ return pt_image_read_callback(image, isid, buffer, size, asid,
+ addr);
+ }
+
+ return status;
+}
+
+int pt_image_add_cached(struct pt_image *image,
+ struct pt_image_section_cache *iscache, int isid,
+ const struct pt_asid *uasid)
+{
+ struct pt_section *section;
+ struct pt_asid asid;
+ uint64_t vaddr;
+ int errcode, status;
+
+ if (!image || !iscache)
+ return -pte_invalid;
+
+ errcode = pt_iscache_lookup(iscache, &section, &vaddr, isid);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_asid_from_user(&asid, uasid);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_image_add(image, section, &asid, vaddr, isid);
+
+ /* We grab a reference when we add the section. Drop the one we
+ * obtained from cache lookup.
+ */
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+}
+
+int pt_image_find(struct pt_image *image, struct pt_mapped_section *usec,
+ const struct pt_asid *asid, uint64_t vaddr)
+{
+ struct pt_mapped_section *msec;
+ struct pt_section_list *slist;
+ struct pt_section *section;
+ int errcode;
+
+ if (!image || !usec)
+ return -pte_internal;
+
+ errcode = pt_image_fetch_section(image, asid, vaddr);
+ if (errcode < 0)
+ return errcode;
+
+ slist = image->sections;
+ if (!slist)
+ return -pte_internal;
+
+ msec = &slist->section;
+ section = pt_msec_section(msec);
+
+ errcode = pt_section_get(section);
+ if (errcode < 0)
+ return errcode;
+
+ *usec = *msec;
+
+ return slist->isid;
+}
+
+int pt_image_validate(const struct pt_image *image,
+ const struct pt_mapped_section *usec, uint64_t vaddr,
+ int isid)
+{
+ const struct pt_section_list *slist;
+ uint64_t begin, end;
+ int status;
+
+ if (!image || !usec)
+ return -pte_internal;
+
+ /* Check that @vaddr lies within @usec. */
+ begin = pt_msec_begin(usec);
+ end = pt_msec_end(usec);
+ if (vaddr < begin || end <= vaddr)
+ return -pte_nomap;
+
+ /* We assume that @usec is a copy of the top of our stack and accept
+ * sporadic validation fails if it isn't, e.g. because it has moved
+ * down.
+ *
+ * A failed validation requires decoders to re-fetch the section so it
+ * only results in a (relatively small) performance loss.
+ */
+ slist = image->sections;
+ if (!slist)
+ return -pte_nomap;
+
+ if (slist->isid != isid)
+ return -pte_nomap;
+
+ status = memcmp(&slist->section, usec, sizeof(*usec));
+ if (status)
+ return -pte_nomap;
+
+ return 0;
+}
diff --git a/libipt/src/pt_image_section_cache.c b/libipt/src/pt_image_section_cache.c
new file mode 100644
index 000000000000..f380890ee0b4
--- /dev/null
+++ b/libipt/src/pt_image_section_cache.c
@@ -0,0 +1,1091 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_image_section_cache.h"
+#include "pt_section.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+
+
+static char *dupstr(const char *str)
+{
+ char *dup;
+ size_t len;
+
+ if (!str)
+ return NULL;
+
+ len = strlen(str);
+ dup = malloc(len + 1);
+ if (!dup)
+ return NULL;
+
+ return strcpy(dup, str);
+}
+
+int pt_iscache_init(struct pt_image_section_cache *iscache, const char *name)
+{
+ if (!iscache)
+ return -pte_internal;
+
+ memset(iscache, 0, sizeof(*iscache));
+ iscache->limit = UINT64_MAX;
+ if (name) {
+ iscache->name = dupstr(name);
+ if (!iscache->name)
+ return -pte_nomem;
+ }
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_init(&iscache->lock, mtx_plain);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+void pt_iscache_fini(struct pt_image_section_cache *iscache)
+{
+ if (!iscache)
+ return;
+
+ (void) pt_iscache_clear(iscache);
+ free(iscache->name);
+
+#if defined(FEATURE_THREADS)
+
+ mtx_destroy(&iscache->lock);
+
+#endif /* defined(FEATURE_THREADS) */
+}
+
+static inline int pt_iscache_lock(struct pt_image_section_cache *iscache)
+{
+ if (!iscache)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&iscache->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static inline int pt_iscache_unlock(struct pt_image_section_cache *iscache)
+{
+ if (!iscache)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&iscache->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static inline int isid_from_index(uint16_t index)
+{
+ return index + 1;
+}
+
+static int pt_iscache_expand(struct pt_image_section_cache *iscache)
+{
+ struct pt_iscache_entry *entries;
+ uint16_t capacity, target;
+
+ if (!iscache)
+ return -pte_internal;
+
+ capacity = iscache->capacity;
+ target = capacity + 8;
+
+ /* Check for overflows. */
+ if (target < capacity)
+ return -pte_nomem;
+
+ entries = realloc(iscache->entries, target * sizeof(*entries));
+ if (!entries)
+ return -pte_nomem;
+
+ iscache->capacity = target;
+ iscache->entries = entries;
+ return 0;
+}
+
+static int pt_iscache_find_locked(struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset,
+ uint64_t size, uint64_t laddr)
+{
+ uint16_t idx, end;
+
+ if (!iscache || !filename)
+ return -pte_internal;
+
+ end = iscache->size;
+ for (idx = 0; idx < end; ++idx) {
+ const struct pt_iscache_entry *entry;
+ const struct pt_section *section;
+ const char *sec_filename;
+ uint64_t sec_offset, sec_size;
+
+ entry = &iscache->entries[idx];
+
+ /* We do not zero-initialize the array - a NULL check is
+ * pointless.
+ */
+ section = entry->section;
+ sec_filename = pt_section_filename(section);
+ sec_offset = pt_section_offset(section);
+ sec_size = pt_section_size(section);
+
+ if (entry->laddr != laddr)
+ continue;
+
+ if (sec_offset != offset)
+ continue;
+
+ if (sec_size != size)
+ continue;
+
+ /* We should not have a section without a filename. */
+ if (!sec_filename)
+ return -pte_internal;
+
+ if (strcmp(sec_filename, filename) != 0)
+ continue;
+
+ return isid_from_index(idx);
+ }
+
+ return 0;
+}
+
+static int pt_iscache_lru_free(struct pt_iscache_lru_entry *lru)
+{
+ while (lru) {
+ struct pt_iscache_lru_entry *trash;
+ int errcode;
+
+ trash = lru;
+ lru = lru->next;
+
+ errcode = pt_section_unmap(trash->section);
+ if (errcode < 0)
+ return errcode;
+
+ free(trash);
+ }
+
+ return 0;
+}
+
+static int pt_iscache_lru_prune(struct pt_image_section_cache *iscache,
+ struct pt_iscache_lru_entry **tail)
+{
+ struct pt_iscache_lru_entry *lru, **pnext;
+ uint64_t limit, used;
+
+ if (!iscache || !tail)
+ return -pte_internal;
+
+ limit = iscache->limit;
+ used = 0ull;
+
+ pnext = &iscache->lru;
+ for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
+
+ used += lru->size;
+ if (used <= limit)
+ continue;
+
+ /* The cache got too big; prune it starting from @lru. */
+ iscache->used = used - lru->size;
+ *pnext = NULL;
+ *tail = lru;
+
+ return 0;
+ }
+
+ /* We shouldn't prune the cache unnecessarily. */
+ return -pte_internal;
+}
+
+/* Add @section to the front of @iscache->lru.
+ *
+ * Returns a positive integer if we need to prune the cache.
+ * Returns zero if we don't need to prune the cache.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int pt_isache_lru_new(struct pt_image_section_cache *iscache,
+ struct pt_section *section)
+{
+ struct pt_iscache_lru_entry *lru;
+ uint64_t memsize, used, total, limit;
+ int errcode;
+
+ if (!iscache)
+ return -pte_internal;
+
+ errcode = pt_section_memsize(section, &memsize);
+ if (errcode < 0)
+ return errcode;
+
+ /* Don't try to add the section if it is too big. We'd prune it again
+ * together with all other sections in our cache.
+ */
+ limit = iscache->limit;
+ if (limit < memsize)
+ return 0;
+
+ errcode = pt_section_map_share(section);
+ if (errcode < 0)
+ return errcode;
+
+ lru = malloc(sizeof(*lru));
+ if (!lru) {
+ (void) pt_section_unmap(section);
+ return -pte_nomem;
+ }
+
+ lru->section = section;
+ lru->size = memsize;
+
+ lru->next = iscache->lru;
+ iscache->lru = lru;
+
+ used = iscache->used;
+ total = used + memsize;
+ if (total < used || total < memsize)
+ return -pte_overflow;
+
+ iscache->used = total;
+
+ return (limit < total) ? 1 : 0;
+}
+
+/* Add or move @section to the front of @iscache->lru.
+ *
+ * Returns a positive integer if we need to prune the cache.
+ * Returns zero if we don't need to prune the cache.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int pt_iscache_lru_add(struct pt_image_section_cache *iscache,
+ struct pt_section *section)
+{
+ struct pt_iscache_lru_entry *lru, **pnext;
+
+ if (!iscache)
+ return -pte_internal;
+
+ pnext = &iscache->lru;
+ for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
+
+ if (lru->section != section)
+ continue;
+
+ /* We found it in the cache. Move it to the front. */
+ *pnext = lru->next;
+ lru->next = iscache->lru;
+ iscache->lru = lru;
+
+ return 0;
+ }
+
+ /* We didn't find it in the cache. Add it. */
+ return pt_isache_lru_new(iscache, section);
+}
+
+
+/* Remove @section from @iscache->lru.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_iscache_lru_remove(struct pt_image_section_cache *iscache,
+ const struct pt_section *section)
+{
+ struct pt_iscache_lru_entry *lru, **pnext;
+
+ if (!iscache)
+ return -pte_internal;
+
+ pnext = &iscache->lru;
+ for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
+
+ if (lru->section != section)
+ continue;
+
+ /* We found it in the cache. Remove it. */
+ *pnext = lru->next;
+ lru->next = NULL;
+ break;
+ }
+
+ return pt_iscache_lru_free(lru);
+}
+
+
+/* Add or move @section to the front of @iscache->lru and update its size.
+ *
+ * Returns a positive integer if we need to prune the cache.
+ * Returns zero if we don't need to prune the cache.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int pt_iscache_lru_resize(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t memsize)
+{
+ struct pt_iscache_lru_entry *lru;
+ uint64_t oldsize, used;
+ int status;
+
+ if (!iscache)
+ return -pte_internal;
+
+ status = pt_iscache_lru_add(iscache, section);
+ if (status < 0)
+ return status;
+
+ lru = iscache->lru;
+ if (!lru) {
+ if (status)
+ return -pte_internal;
+ return 0;
+ }
+
+ /* If @section is cached, it must be first.
+ *
+ * We may choose not to cache it, though, e.g. if it is too big.
+ */
+ if (lru->section != section) {
+ if (iscache->limit < memsize)
+ return 0;
+
+ return -pte_internal;
+ }
+
+ oldsize = lru->size;
+ lru->size = memsize;
+
+ /* If we need to prune anyway, we're done. */
+ if (status)
+ return status;
+
+ used = iscache->used;
+ used -= oldsize;
+ used += memsize;
+
+ iscache->used = used;
+
+ return (iscache->limit < used) ? 1 : 0;
+}
+
+/* Clear @iscache->lru.
+ *
+ * Unlike other iscache_lru functions, the caller does not lock @iscache.
+ *
+ * Return zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_iscache_lru_clear(struct pt_image_section_cache *iscache)
+{
+ struct pt_iscache_lru_entry *lru;
+ int errcode;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ lru = iscache->lru;
+ iscache->lru = NULL;
+ iscache->used = 0ull;
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ return pt_iscache_lru_free(lru);
+}
+
+/* Search @iscache for a partial or exact match of @section loaded at @laddr and
+ * return the corresponding index or @iscache->size if no match is found.
+ *
+ * The caller must lock @iscache.
+ *
+ * Returns a non-zero index on success, a negative pt_error_code otherwise.
+ */
+static int
+pt_iscache_find_section_locked(const struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset,
+ uint64_t size, uint64_t laddr)
+{
+ const struct pt_section *section;
+ uint16_t idx, end;
+ int match;
+
+ if (!iscache || !filename)
+ return -pte_internal;
+
+ section = NULL;
+ match = end = iscache->size;
+ for (idx = 0; idx < end; ++idx) {
+ const struct pt_iscache_entry *entry;
+ const struct pt_section *sec;
+
+ entry = &iscache->entries[idx];
+
+ /* We do not zero-initialize the array - a NULL check is
+ * pointless.
+ */
+ sec = entry->section;
+
+ /* Avoid redundant match checks. */
+ if (sec != section) {
+ const char *sec_filename;
+
+ /* We don't have duplicates. Skip the check. */
+ if (section)
+ continue;
+
+ if (offset != pt_section_offset(sec))
+ continue;
+
+ if (size != pt_section_size(sec))
+ continue;
+
+ sec_filename = pt_section_filename(sec);
+ if (!sec_filename)
+ return -pte_internal;
+
+ if (strcmp(filename, sec_filename) != 0)
+ continue;
+
+ /* Use the cached section instead. */
+ section = sec;
+ match = idx;
+ }
+
+ /* If we didn't continue, @section == @sec and we have a match.
+ *
+ * If we also find a matching load address, we're done.
+ */
+ if (laddr == entry->laddr)
+ return idx;
+ }
+
+ return match;
+}
+
+int pt_iscache_add(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t laddr)
+{
+ const char *filename;
+ uint64_t offset, size;
+ uint16_t idx;
+ int errcode;
+
+ if (!iscache || !section)
+ return -pte_internal;
+
+ /* We must have a filename for @section. */
+ filename = pt_section_filename(section);
+ if (!filename)
+ return -pte_internal;
+
+ offset = pt_section_offset(section);
+ size = pt_section_size(section);
+
+ /* Adding a section is slightly complicated by a potential deadlock
+ * scenario:
+ *
+ * - in order to add a section, we need to attach to it, which
+ * requires taking the section's attach lock.
+ *
+ * - if we are already attached to it, we may receive on-map
+ * notifications, which will be sent while holding the attach lock
+ * and require taking the iscache lock.
+ *
+ * Hence we can't attach to a section while holding the iscache lock.
+ *
+ *
+ * We therefore attach to @section first and then lock @iscache.
+ *
+ * This opens a small window where an existing @section may be removed
+ * from @iscache and replaced by a new matching section. We would want
+ * to share that new section rather than adding a duplicate @section.
+ *
+ * After locking @iscache, we therefore check for existing matching
+ * sections and, if one is found, update @section. This involves
+ * detaching from @section and attaching to the existing section.
+ *
+ * And for this, we will have to temporarily unlock @iscache again.
+ */
+ errcode = pt_section_get(section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_attach(section, iscache);
+ if (errcode < 0)
+ goto out_put;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ goto out_detach;
+
+ /* We may need to repeat this step.
+ *
+ * Typically we don't and this takes only a single iteration. One
+ * scenario where we do repeat this is when adding a section with an
+ * out-of-bounds size.
+ *
+ * We will not find a matching section in pt_iscache_add_file() so we
+ * create a new section. This will have its size reduced to match the
+ * actual file size.
+ *
+ * For this reduced size, we may now find an existing section, and we
+ * will take another trip in the below loop.
+ */
+ for (;;) {
+ const struct pt_iscache_entry *entry;
+ struct pt_section *sec;
+ int match;
+
+ /* Find an existing section matching @section that we'd share
+ * rather than adding @section.
+ */
+ match = pt_iscache_find_section_locked(iscache, filename,
+ offset, size, laddr);
+ if (match < 0) {
+ errcode = match;
+ goto out_unlock_detach;
+ }
+
+ /* We're done if we have not found a matching section. */
+ if (iscache->size <= match)
+ break;
+
+ entry = &iscache->entries[match];
+
+ /* We're also done if we found the same section again.
+ *
+ * We further check for a perfect match. In that case, we don't
+ * need to insert anything, at all.
+ */
+ sec = entry->section;
+ if (sec == section) {
+ if (entry->laddr == laddr) {
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ goto out_detach;
+
+ errcode = pt_section_detach(section, iscache);
+ if (errcode < 0)
+ goto out_lru;
+
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+
+ return isid_from_index((uint16_t) match);
+ }
+
+ break;
+ }
+
+ /* We update @section to share the existing @sec.
+ *
+ * This requires detaching from @section, which, in turn,
+ * requires temporarily unlocking @iscache.
+ *
+ * We further need to remove @section from @iscache->lru.
+ */
+ errcode = pt_section_get(sec);
+ if (errcode < 0)
+ goto out_unlock_detach;
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0) {
+ (void) pt_section_put(sec);
+ goto out_detach;
+ }
+
+ errcode = pt_section_detach(section, iscache);
+ if (errcode < 0) {
+ (void) pt_section_put(sec);
+ goto out_lru;
+ }
+
+ errcode = pt_section_attach(sec, iscache);
+ if (errcode < 0) {
+ (void) pt_section_put(sec);
+ goto out_lru;
+ }
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ /* Complete the swap for cleanup. */
+ section = sec;
+ goto out_detach;
+ }
+
+ /* We may have received on-map notifications for @section and we
+ * may have added @section to @iscache->lru.
+ *
+ * Since we're still holding a reference to it, no harm has been
+ * done. But we need to remove it before we drop our reference.
+ */
+ errcode = pt_iscache_lru_remove(iscache, section);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ /* Complete the swap for cleanup. */
+ section = sec;
+ goto out_unlock_detach;
+ }
+
+ /* Drop the reference to @section. */
+ errcode = pt_section_put(section);
+ if (errcode < 0) {
+ /* Complete the swap for cleanup. */
+ section = sec;
+ goto out_unlock_detach;
+ }
+
+ /* Swap sections.
+ *
+ * We will try again in the next iteration.
+ */
+ section = sec;
+ }
+
+ /* Expand the cache, if necessary. */
+ if (iscache->capacity <= iscache->size) {
+ /* We must never exceed the capacity. */
+ if (iscache->capacity < iscache->size) {
+ errcode = -pte_internal;
+ goto out_unlock_detach;
+ }
+
+ errcode = pt_iscache_expand(iscache);
+ if (errcode < 0)
+ goto out_unlock_detach;
+
+ /* Make sure it is big enough, now. */
+ if (iscache->capacity <= iscache->size) {
+ errcode = -pte_internal;
+ goto out_unlock_detach;
+ }
+ }
+
+ /* Insert a new entry for @section at @laddr.
+ *
+ * This hands both attach and reference over to @iscache. We will
+ * detach and drop the reference again when the entry is removed.
+ */
+ idx = iscache->size++;
+
+ iscache->entries[idx].section = section;
+ iscache->entries[idx].laddr = laddr;
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ return isid_from_index(idx);
+
+ out_unlock_detach:
+ (void) pt_iscache_unlock(iscache);
+
+ out_detach:
+ (void) pt_section_detach(section, iscache);
+
+ out_lru:
+ (void) pt_iscache_lru_clear(iscache);
+
+ out_put:
+ (void) pt_section_put(section);
+
+ return errcode;
+}
+
+int pt_iscache_find(struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset, uint64_t size,
+ uint64_t laddr)
+{
+ int errcode, isid;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ isid = pt_iscache_find_locked(iscache, filename, offset, size, laddr);
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ return isid;
+}
+
+int pt_iscache_lookup(struct pt_image_section_cache *iscache,
+ struct pt_section **section, uint64_t *laddr, int isid)
+{
+ uint16_t index;
+ int errcode, status;
+
+ if (!iscache || !section || !laddr)
+ return -pte_internal;
+
+ if (isid <= 0)
+ return -pte_bad_image;
+
+ isid -= 1;
+ if (isid > UINT16_MAX)
+ return -pte_internal;
+
+ index = (uint16_t) isid;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ if (iscache->size <= index)
+ status = -pte_bad_image;
+ else {
+ const struct pt_iscache_entry *entry;
+
+ entry = &iscache->entries[index];
+ *section = entry->section;
+ *laddr = entry->laddr;
+
+ status = pt_section_get(*section);
+ }
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+}
+
+int pt_iscache_clear(struct pt_image_section_cache *iscache)
+{
+ struct pt_iscache_lru_entry *lru;
+ struct pt_iscache_entry *entries;
+ uint16_t idx, end;
+ int errcode;
+
+ if (!iscache)
+ return -pte_internal;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ entries = iscache->entries;
+ end = iscache->size;
+ lru = iscache->lru;
+
+ iscache->entries = NULL;
+ iscache->capacity = 0;
+ iscache->size = 0;
+ iscache->lru = NULL;
+ iscache->used = 0ull;
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_iscache_lru_free(lru);
+ if (errcode < 0)
+ return errcode;
+
+ for (idx = 0; idx < end; ++idx) {
+ struct pt_section *section;
+
+ section = entries[idx].section;
+
+ /* We do not zero-initialize the array - a NULL check is
+ * pointless.
+ */
+ errcode = pt_section_detach(section, iscache);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ free(entries);
+ return 0;
+}
+
+struct pt_image_section_cache *pt_iscache_alloc(const char *name)
+{
+ struct pt_image_section_cache *iscache;
+
+ iscache = malloc(sizeof(*iscache));
+ if (iscache)
+ pt_iscache_init(iscache, name);
+
+ return iscache;
+}
+
+void pt_iscache_free(struct pt_image_section_cache *iscache)
+{
+ if (!iscache)
+ return;
+
+ pt_iscache_fini(iscache);
+ free(iscache);
+}
+
+int pt_iscache_set_limit(struct pt_image_section_cache *iscache, uint64_t limit)
+{
+ struct pt_iscache_lru_entry *tail;
+ int errcode, status;
+
+ if (!iscache)
+ return -pte_invalid;
+
+ status = 0;
+ tail = NULL;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ iscache->limit = limit;
+ if (limit < iscache->used)
+ status = pt_iscache_lru_prune(iscache, &tail);
+
+ errcode = pt_iscache_unlock(iscache);
+
+ if (errcode < 0 || status < 0)
+ return (status < 0) ? status : errcode;
+
+ return pt_iscache_lru_free(tail);
+}
+
+const char *pt_iscache_name(const struct pt_image_section_cache *iscache)
+{
+ if (!iscache)
+ return NULL;
+
+ return iscache->name;
+}
+
+int pt_iscache_add_file(struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset, uint64_t size,
+ uint64_t vaddr)
+{
+ struct pt_section *section;
+ int errcode, match, isid;
+
+ if (!iscache || !filename)
+ return -pte_invalid;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ match = pt_iscache_find_section_locked(iscache, filename, offset,
+ size, vaddr);
+ if (match < 0) {
+ (void) pt_iscache_unlock(iscache);
+ return match;
+ }
+
+ /* If we found a perfect match, we will share the existing entry.
+ *
+ * If we found a section, we need to grab a reference before we unlock.
+ *
+ * If we didn't find a matching section, we create a new section, which
+ * implicitly gives us a reference to it.
+ */
+ if (match < iscache->size) {
+ const struct pt_iscache_entry *entry;
+
+ entry = &iscache->entries[match];
+ if (entry->laddr == vaddr) {
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ return isid_from_index((uint16_t) match);
+ }
+
+ section = entry->section;
+
+ errcode = pt_section_get(section);
+ if (errcode < 0) {
+ (void) pt_iscache_unlock(iscache);
+ return errcode;
+ }
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ return errcode;
+ }
+ } else {
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ section = pt_mk_section(filename, offset, size);
+ if (!section)
+ return -pte_invalid;
+ }
+
+ /* We unlocked @iscache and hold a reference to @section. */
+ isid = pt_iscache_add(iscache, section, vaddr);
+
+ /* We grab a reference when we add the section. Drop the one we
+ * obtained before.
+ */
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+
+ return isid;
+}
+
+
+int pt_iscache_read(struct pt_image_section_cache *iscache, uint8_t *buffer,
+ uint64_t size, int isid, uint64_t vaddr)
+{
+ struct pt_section *section;
+ uint64_t laddr;
+ int errcode, status;
+
+ if (!iscache || !buffer || !size)
+ return -pte_invalid;
+
+ errcode = pt_iscache_lookup(iscache, &section, &laddr, isid);
+ if (errcode < 0)
+ return errcode;
+
+ if (vaddr < laddr) {
+ (void) pt_section_put(section);
+ return -pte_nomap;
+ }
+
+ vaddr -= laddr;
+
+ errcode = pt_section_map(section);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ return errcode;
+ }
+
+ /* We truncate the read if it gets too big. The user is expected to
+ * issue further reads for the remaining part.
+ */
+ if (UINT16_MAX < size)
+ size = UINT16_MAX;
+
+ status = pt_section_read(section, buffer, (uint16_t) size, vaddr);
+
+ errcode = pt_section_unmap(section);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ return errcode;
+ }
+
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+}
+
+int pt_iscache_notify_map(struct pt_image_section_cache *iscache,
+ struct pt_section *section)
+{
+ struct pt_iscache_lru_entry *tail;
+ int errcode, status;
+
+ tail = NULL;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_iscache_lru_add(iscache, section);
+ if (status > 0)
+ status = pt_iscache_lru_prune(iscache, &tail);
+
+ errcode = pt_iscache_unlock(iscache);
+
+ if (errcode < 0 || status < 0)
+ return (status < 0) ? status : errcode;
+
+ return pt_iscache_lru_free(tail);
+}
+
+int pt_iscache_notify_resize(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t memsize)
+{
+ struct pt_iscache_lru_entry *tail;
+ int errcode, status;
+
+ tail = NULL;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_iscache_lru_resize(iscache, section, memsize);
+ if (status > 0)
+ status = pt_iscache_lru_prune(iscache, &tail);
+
+ errcode = pt_iscache_unlock(iscache);
+
+ if (errcode < 0 || status < 0)
+ return (status < 0) ? status : errcode;
+
+ return pt_iscache_lru_free(tail);
+}
diff --git a/libipt/src/pt_insn.c b/libipt/src/pt_insn.c
new file mode 100644
index 000000000000..0a41c4bf391e
--- /dev/null
+++ b/libipt/src/pt_insn.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_insn.h"
+#include "pt_ild.h"
+#include "pt_image.h"
+#include "pt_compiler.h"
+
+#include "intel-pt.h"
+
+
+int pt_insn_changes_cpl(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ (void) insn;
+
+ if (!iext)
+ return 0;
+
+ switch (iext->iclass) {
+ default:
+ return 0;
+
+ case PTI_INST_INT:
+ case PTI_INST_INT3:
+ case PTI_INST_INT1:
+ case PTI_INST_INTO:
+ case PTI_INST_IRET:
+ case PTI_INST_SYSCALL:
+ case PTI_INST_SYSENTER:
+ case PTI_INST_SYSEXIT:
+ case PTI_INST_SYSRET:
+ return 1;
+ }
+}
+
+int pt_insn_changes_cr3(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ (void) insn;
+
+ if (!iext)
+ return 0;
+
+ switch (iext->iclass) {
+ default:
+ return 0;
+
+ case PTI_INST_MOV_CR3:
+ return 1;
+ }
+}
+
+int pt_insn_is_branch(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ (void) iext;
+
+ if (!insn)
+ return 0;
+
+ switch (insn->iclass) {
+ default:
+ return 0;
+
+ case ptic_call:
+ case ptic_return:
+ case ptic_jump:
+ case ptic_cond_jump:
+ case ptic_far_call:
+ case ptic_far_return:
+ case ptic_far_jump:
+ return 1;
+ }
+}
+
+int pt_insn_is_far_branch(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ (void) iext;
+
+ if (!insn)
+ return 0;
+
+ switch (insn->iclass) {
+ default:
+ return 0;
+
+ case ptic_far_call:
+ case ptic_far_return:
+ case ptic_far_jump:
+ return 1;
+ }
+}
+
+int pt_insn_binds_to_pip(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!iext)
+ return 0;
+
+ switch (iext->iclass) {
+ default:
+ return pt_insn_is_far_branch(insn, iext);
+
+ case PTI_INST_MOV_CR3:
+ case PTI_INST_VMLAUNCH:
+ case PTI_INST_VMRESUME:
+ return 1;
+ }
+}
+
+int pt_insn_binds_to_vmcs(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!iext)
+ return 0;
+
+ switch (iext->iclass) {
+ default:
+ return pt_insn_is_far_branch(insn, iext);
+
+ case PTI_INST_VMPTRLD:
+ case PTI_INST_VMLAUNCH:
+ case PTI_INST_VMRESUME:
+ return 1;
+ }
+}
+
+int pt_insn_is_ptwrite(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ (void) iext;
+
+ if (!insn)
+ return 0;
+
+ switch (insn->iclass) {
+ default:
+ return 0;
+
+ case ptic_ptwrite:
+ return 1;
+ }
+}
+
+int pt_insn_next_ip(uint64_t *pip, const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ uint64_t ip;
+
+ if (!insn || !iext)
+ return -pte_internal;
+
+ ip = insn->ip + insn->size;
+
+ switch (insn->iclass) {
+ case ptic_ptwrite:
+ case ptic_other:
+ break;
+
+ case ptic_call:
+ case ptic_jump:
+ if (iext->variant.branch.is_direct) {
+ ip += iext->variant.branch.displacement;
+ break;
+ }
+
+ fallthrough;
+ default:
+ return -pte_bad_query;
+
+ case ptic_error:
+ return -pte_bad_insn;
+ }
+
+ if (pip)
+ *pip = ip;
+
+ return 0;
+}
+
+/* Retry decoding an instruction after a preceding decode error.
+ *
+ * Instruction length decode typically fails due to 'not enough
+ * bytes'.
+ *
+ * This may be caused by partial updates of text sections
+ * represented via new image sections overlapping the original
+ * text section's image section. We stop reading memory at the
+ * end of the section so we do not read the full instruction if
+ * parts of it have been overwritten by the update.
+ *
+ * Try to read the remaining bytes and decode the instruction again. If we
+ * succeed, set @insn->truncated to indicate that the instruction is truncated
+ * in @insn->isid.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_bad_insn if the instruction could not be decoded.
+ */
+static int pt_insn_decode_retry(struct pt_insn *insn, struct pt_insn_ext *iext,
+ struct pt_image *image,
+ const struct pt_asid *asid)
+{
+ int size, errcode, isid;
+ uint8_t isize, remaining;
+
+ if (!insn)
+ return -pte_internal;
+
+ isize = insn->size;
+ remaining = sizeof(insn->raw) - isize;
+
+ /* We failed for real if we already read the maximum number of bytes for
+ * an instruction.
+ */
+ if (!remaining)
+ return -pte_bad_insn;
+
+ /* Read the remaining bytes from the image. */
+ size = pt_image_read(image, &isid, &insn->raw[isize], remaining, asid,
+ insn->ip + isize);
+ if (size <= 0) {
+ /* We should have gotten an error if we were not able to read at
+ * least one byte. Check this to guarantee termination.
+ */
+ if (!size)
+ return -pte_internal;
+
+ /* Preserve the original error if there are no more bytes. */
+ if (size == -pte_nomap)
+ size = -pte_bad_insn;
+
+ return size;
+ }
+
+ /* Add the newly read bytes to the instruction's size. */
+ insn->size += (uint8_t) size;
+
+ /* Store the new size to avoid infinite recursion in case instruction
+ * decode fails after length decode, which would set @insn->size to the
+ * actual length.
+ */
+ size = insn->size;
+
+ /* Try to decode the instruction again.
+ *
+ * If we fail again, we recursively retry again until we either fail to
+ * read more bytes or reach the maximum number of bytes for an
+ * instruction.
+ */
+ errcode = pt_ild_decode(insn, iext);
+ if (errcode < 0) {
+ if (errcode != -pte_bad_insn)
+ return errcode;
+
+ /* If instruction length decode already determined the size,
+ * there's no point in reading more bytes.
+ */
+ if (insn->size != (uint8_t) size)
+ return errcode;
+
+ return pt_insn_decode_retry(insn, iext, image, asid);
+ }
+
+ /* We succeeded this time, so the instruction crosses image section
+ * boundaries.
+ *
+ * This poses the question which isid to use for the instruction.
+ *
+ * To reconstruct exactly this instruction at a later time, we'd need to
+ * store all isids involved together with the number of bytes read for
+ * each isid. Since @insn already provides the exact bytes for this
+ * instruction, we assume that the isid will be used solely for source
+ * correlation. In this case, it should refer to the first byte of the
+ * instruction - as it already does.
+ */
+ insn->truncated = 1;
+
+ return errcode;
+}
+
+int pt_insn_decode(struct pt_insn *insn, struct pt_insn_ext *iext,
+ struct pt_image *image, const struct pt_asid *asid)
+{
+ int size, errcode;
+
+ if (!insn)
+ return -pte_internal;
+
+ /* Read the memory at the current IP in the current address space. */
+ size = pt_image_read(image, &insn->isid, insn->raw, sizeof(insn->raw),
+ asid, insn->ip);
+ if (size < 0)
+ return size;
+
+ /* We initialize @insn->size to the maximal possible size. It will be
+ * set to the actual size during instruction decode.
+ */
+ insn->size = (uint8_t) size;
+
+ errcode = pt_ild_decode(insn, iext);
+ if (errcode < 0) {
+ if (errcode != -pte_bad_insn)
+ return errcode;
+
+ /* If instruction length decode already determined the size,
+ * there's no point in reading more bytes.
+ */
+ if (insn->size != (uint8_t) size)
+ return errcode;
+
+ return pt_insn_decode_retry(insn, iext, image, asid);
+ }
+
+ return errcode;
+}
+
+int pt_insn_range_is_contiguous(uint64_t begin, uint64_t end,
+ enum pt_exec_mode mode, struct pt_image *image,
+ const struct pt_asid *asid, size_t steps)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+
+ memset(&insn, 0, sizeof(insn));
+
+ insn.mode = mode;
+ insn.ip = begin;
+
+ while (insn.ip != end) {
+ int errcode;
+
+ if (!steps--)
+ return 0;
+
+ errcode = pt_insn_decode(&insn, &iext, image, asid);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_insn_next_ip(&insn.ip, &insn, &iext);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 1;
+}
diff --git a/libipt/src/pt_insn_decoder.c b/libipt/src/pt_insn_decoder.c
new file mode 100644
index 000000000000..0cf8740ee8dc
--- /dev/null
+++ b/libipt/src/pt_insn_decoder.c
@@ -0,0 +1,1765 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_insn_decoder.h"
+#include "pt_insn.h"
+#include "pt_config.h"
+#include "pt_asid.h"
+#include "pt_compiler.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+
+static int pt_insn_check_ip_event(struct pt_insn_decoder *,
+ const struct pt_insn *,
+ const struct pt_insn_ext *);
+
+
+static void pt_insn_reset(struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ decoder->mode = ptem_unknown;
+ decoder->ip = 0ull;
+ decoder->status = 0;
+ decoder->enabled = 0;
+ decoder->process_event = 0;
+ decoder->speculative = 0;
+ decoder->process_insn = 0;
+ decoder->bound_paging = 0;
+ decoder->bound_vmcs = 0;
+ decoder->bound_ptwrite = 0;
+
+ pt_retstack_init(&decoder->retstack);
+ pt_asid_init(&decoder->asid);
+}
+
+static int pt_insn_status(const struct pt_insn_decoder *decoder, int flags)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = decoder->status;
+
+ /* Indicate whether tracing is disabled or enabled.
+ *
+ * This duplicates the indication in struct pt_insn and covers the case
+ * where we indicate the status after synchronizing.
+ */
+ if (!decoder->enabled)
+ flags |= pts_ip_suppressed;
+
+ /* Forward end-of-trace indications.
+ *
+ * Postpone it as long as we're still processing events, though.
+ */
+ if ((status & pts_eos) && !decoder->process_event)
+ flags |= pts_eos;
+
+ return flags;
+}
+
+/* Initialize the query decoder flags based on our flags. */
+
+static int pt_insn_init_qry_flags(struct pt_conf_flags *qflags,
+ const struct pt_conf_flags *flags)
+{
+ if (!qflags || !flags)
+ return -pte_internal;
+
+ memset(qflags, 0, sizeof(*qflags));
+
+ return 0;
+}
+
+int pt_insn_decoder_init(struct pt_insn_decoder *decoder,
+ const struct pt_config *uconfig)
+{
+ struct pt_config config;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ errcode = pt_config_from_user(&config, uconfig);
+ if (errcode < 0)
+ return errcode;
+
+ /* The user supplied decoder flags. */
+ decoder->flags = config.flags;
+
+ /* Set the flags we need for the query decoder we use. */
+ errcode = pt_insn_init_qry_flags(&config.flags, &decoder->flags);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_qry_decoder_init(&decoder->query, &config);
+ if (errcode < 0)
+ return errcode;
+
+ pt_image_init(&decoder->default_image, NULL);
+ decoder->image = &decoder->default_image;
+
+ errcode = pt_msec_cache_init(&decoder->scache);
+ if (errcode < 0)
+ return errcode;
+
+ pt_insn_reset(decoder);
+
+ return 0;
+}
+
+void pt_insn_decoder_fini(struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ pt_msec_cache_fini(&decoder->scache);
+ pt_image_fini(&decoder->default_image);
+ pt_qry_decoder_fini(&decoder->query);
+}
+
+struct pt_insn_decoder *pt_insn_alloc_decoder(const struct pt_config *config)
+{
+ struct pt_insn_decoder *decoder;
+ int errcode;
+
+ decoder = malloc(sizeof(*decoder));
+ if (!decoder)
+ return NULL;
+
+ errcode = pt_insn_decoder_init(decoder, config);
+ if (errcode < 0) {
+ free(decoder);
+ return NULL;
+ }
+
+ return decoder;
+}
+
+void pt_insn_free_decoder(struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ pt_insn_decoder_fini(decoder);
+ free(decoder);
+}
+
+/* Maybe synthesize a tick event.
+ *
+ * If we're not already processing events, check the current time against the
+ * last event's time. If it changed, synthesize a tick event with the new time.
+ *
+ * Returns zero if no tick event has been created.
+ * Returns a positive integer if a tick event has been created.
+ * Returns a negative error code otherwise.
+ */
+static int pt_insn_tick(struct pt_insn_decoder *decoder, uint64_t ip)
+{
+ struct pt_event *ev;
+ uint64_t tsc;
+ uint32_t lost_mtc, lost_cyc;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* We're not generating tick events if tracing is disabled. */
+ if (!decoder->enabled)
+ return -pte_internal;
+
+ /* Events already provide a timestamp so there is no need to synthesize
+ * an artificial tick event. There's no room, either, since this would
+ * overwrite the in-progress event.
+ *
+ * In rare cases where we need to proceed to an event location using
+ * trace this may cause us to miss a timing update if the event is not
+ * forwarded to the user.
+ *
+ * The only case I can come up with at the moment is a MODE.EXEC binding
+ * to the TIP IP of a far branch.
+ */
+ if (decoder->process_event)
+ return 0;
+
+ errcode = pt_qry_time(&decoder->query, &tsc, &lost_mtc, &lost_cyc);
+ if (errcode < 0) {
+ /* If we don't have wall-clock time, we use relative time. */
+ if (errcode != -pte_no_time)
+ return errcode;
+ }
+
+ ev = &decoder->event;
+
+ /* We're done if time has not changed since the last event. */
+ if (tsc == ev->tsc)
+ return 0;
+
+ /* Time has changed so we create a new tick event. */
+ memset(ev, 0, sizeof(*ev));
+ ev->type = ptev_tick;
+ ev->variant.tick.ip = ip;
+
+ /* Indicate if we have wall-clock time or only relative time. */
+ if (errcode != -pte_no_time)
+ ev->has_tsc = 1;
+ ev->tsc = tsc;
+ ev->lost_mtc = lost_mtc;
+ ev->lost_cyc = lost_cyc;
+
+ /* We now have an event to process. */
+ decoder->process_event = 1;
+
+ return 1;
+}
+
+/* Query an indirect branch.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_insn_indirect_branch(struct pt_insn_decoder *decoder,
+ uint64_t *ip)
+{
+ uint64_t evip;
+ int status, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ evip = decoder->ip;
+
+ status = pt_qry_indirect_branch(&decoder->query, ip);
+ if (status < 0)
+ return status;
+
+ if (decoder->flags.variant.insn.enable_tick_events) {
+ errcode = pt_insn_tick(decoder, evip);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return status;
+}
+
+/* Query a conditional branch.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_insn_cond_branch(struct pt_insn_decoder *decoder, int *taken)
+{
+ int status, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_cond_branch(&decoder->query, taken);
+ if (status < 0)
+ return status;
+
+ if (decoder->flags.variant.insn.enable_tick_events) {
+ errcode = pt_insn_tick(decoder, decoder->ip);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return status;
+}
+
+static int pt_insn_start(struct pt_insn_decoder *decoder, int status)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ if (status < 0)
+ return status;
+
+ decoder->status = status;
+
+ if (!(status & pts_ip_suppressed))
+ decoder->enabled = 1;
+
+ /* Process any initial events.
+ *
+ * Some events are processed after proceeding to the next IP in order to
+ * indicate things like tracing disable or trace stop in the preceding
+ * instruction. Those events will be processed without such an
+ * indication before decoding the current instruction.
+ *
+ * We do this already here so we can indicate user-events that precede
+ * the first instruction.
+ */
+ return pt_insn_check_ip_event(decoder, NULL, NULL);
+}
+
+int pt_insn_sync_forward(struct pt_insn_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ pt_insn_reset(decoder);
+
+ status = pt_qry_sync_forward(&decoder->query, &decoder->ip);
+
+ return pt_insn_start(decoder, status);
+}
+
+int pt_insn_sync_backward(struct pt_insn_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ pt_insn_reset(decoder);
+
+ status = pt_qry_sync_backward(&decoder->query, &decoder->ip);
+
+ return pt_insn_start(decoder, status);
+}
+
+int pt_insn_sync_set(struct pt_insn_decoder *decoder, uint64_t offset)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ pt_insn_reset(decoder);
+
+ status = pt_qry_sync_set(&decoder->query, &decoder->ip, offset);
+
+ return pt_insn_start(decoder, status);
+}
+
+int pt_insn_get_offset(const struct pt_insn_decoder *decoder, uint64_t *offset)
+{
+ if (!decoder)
+ return -pte_invalid;
+
+ return pt_qry_get_offset(&decoder->query, offset);
+}
+
+int pt_insn_get_sync_offset(const struct pt_insn_decoder *decoder,
+ uint64_t *offset)
+{
+ if (!decoder)
+ return -pte_invalid;
+
+ return pt_qry_get_sync_offset(&decoder->query, offset);
+}
+
+struct pt_image *pt_insn_get_image(struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return NULL;
+
+ return decoder->image;
+}
+
+int pt_insn_set_image(struct pt_insn_decoder *decoder,
+ struct pt_image *image)
+{
+ if (!decoder)
+ return -pte_invalid;
+
+ if (!image)
+ image = &decoder->default_image;
+
+ decoder->image = image;
+ return 0;
+}
+
+const struct pt_config *
+pt_insn_get_config(const struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return NULL;
+
+ return pt_qry_get_config(&decoder->query);
+}
+
+int pt_insn_time(struct pt_insn_decoder *decoder, uint64_t *time,
+ uint32_t *lost_mtc, uint32_t *lost_cyc)
+{
+ if (!decoder || !time)
+ return -pte_invalid;
+
+ return pt_qry_time(&decoder->query, time, lost_mtc, lost_cyc);
+}
+
+int pt_insn_core_bus_ratio(struct pt_insn_decoder *decoder, uint32_t *cbr)
+{
+ if (!decoder || !cbr)
+ return -pte_invalid;
+
+ return pt_qry_core_bus_ratio(&decoder->query, cbr);
+}
+
+int pt_insn_asid(const struct pt_insn_decoder *decoder, struct pt_asid *asid,
+ size_t size)
+{
+ if (!decoder || !asid)
+ return -pte_invalid;
+
+ return pt_asid_to_user(asid, &decoder->asid, size);
+}
+
+static inline int event_pending(struct pt_insn_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ if (decoder->process_event)
+ return 1;
+
+ status = decoder->status;
+ if (!(status & pts_event_pending))
+ return 0;
+
+ status = pt_qry_event(&decoder->query, &decoder->event,
+ sizeof(decoder->event));
+ if (status < 0)
+ return status;
+
+ decoder->process_event = 1;
+ decoder->status = status;
+ return 1;
+}
+
+static int check_erratum_skd022(struct pt_insn_decoder *decoder)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ insn.mode = decoder->mode;
+ insn.ip = decoder->ip;
+
+ errcode = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid);
+ if (errcode < 0)
+ return 0;
+
+ switch (iext.iclass) {
+ default:
+ return 0;
+
+ case PTI_INST_VMLAUNCH:
+ case PTI_INST_VMRESUME:
+ return 1;
+ }
+}
+
+static inline int handle_erratum_skd022(struct pt_insn_decoder *decoder)
+{
+ struct pt_event *ev;
+ uint64_t ip;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ errcode = check_erratum_skd022(decoder);
+ if (errcode <= 0)
+ return errcode;
+
+ /* We turn the async disable into a sync disable. It will be processed
+ * after decoding the instruction.
+ */
+ ev = &decoder->event;
+
+ ip = ev->variant.async_disabled.ip;
+
+ ev->type = ptev_disabled;
+ ev->variant.disabled.ip = ip;
+
+ return 1;
+}
+
+static int pt_insn_proceed(struct pt_insn_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!decoder || !insn || !iext)
+ return -pte_internal;
+
+ /* Branch displacements apply to the next instruction. */
+ decoder->ip += insn->size;
+
+ /* We handle non-branches, non-taken conditional branches, and
+ * compressed returns directly in the switch and do some pre-work for
+ * calls.
+ *
+ * All kinds of branches are handled below the switch.
+ */
+ switch (insn->iclass) {
+ case ptic_ptwrite:
+ case ptic_other:
+ return 0;
+
+ case ptic_cond_jump: {
+ int status, taken;
+
+ status = pt_insn_cond_branch(decoder, &taken);
+ if (status < 0)
+ return status;
+
+ decoder->status = status;
+ if (!taken)
+ return 0;
+
+ break;
+ }
+
+ case ptic_call:
+ /* Log the call for return compression.
+ *
+ * Unless this is a call to the next instruction as is used
+ * for position independent code.
+ */
+ if (iext->variant.branch.displacement ||
+ !iext->variant.branch.is_direct)
+ pt_retstack_push(&decoder->retstack, decoder->ip);
+
+ break;
+
+ case ptic_return: {
+ int taken, status;
+
+ /* Check for a compressed return. */
+ status = pt_insn_cond_branch(decoder, &taken);
+ if (status >= 0) {
+ decoder->status = status;
+
+ /* A compressed return is indicated by a taken
+ * conditional branch.
+ */
+ if (!taken)
+ return -pte_bad_retcomp;
+
+ return pt_retstack_pop(&decoder->retstack,
+ &decoder->ip);
+ }
+
+ break;
+ }
+
+ case ptic_jump:
+ case ptic_far_call:
+ case ptic_far_return:
+ case ptic_far_jump:
+ break;
+
+ case ptic_error:
+ return -pte_bad_insn;
+ }
+
+ /* Process a direct or indirect branch.
+ *
+ * This combines calls, uncompressed returns, taken conditional jumps,
+ * and all flavors of far transfers.
+ */
+ if (iext->variant.branch.is_direct)
+ decoder->ip += iext->variant.branch.displacement;
+ else {
+ int status;
+
+ status = pt_insn_indirect_branch(decoder, &decoder->ip);
+
+ if (status < 0)
+ return status;
+
+ decoder->status = status;
+
+ /* We do need an IP to proceed. */
+ if (status & pts_ip_suppressed)
+ return -pte_noip;
+ }
+
+ return 0;
+}
+
+static int pt_insn_at_skl014(const struct pt_event *ev,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext,
+ const struct pt_config *config)
+{
+ uint64_t ip;
+ int status;
+
+ if (!ev || !insn || !iext || !config)
+ return -pte_internal;
+
+ if (!ev->ip_suppressed)
+ return 0;
+
+ switch (insn->iclass) {
+ case ptic_call:
+ case ptic_jump:
+ /* The erratum only applies to unconditional direct branches. */
+ if (!iext->variant.branch.is_direct)
+ break;
+
+ /* Check the filter against the branch target. */
+ ip = insn->ip;
+ ip += insn->size;
+ ip += iext->variant.branch.displacement;
+
+ status = pt_filter_addr_check(&config->addr_filter, ip);
+ if (status <= 0) {
+ if (status < 0)
+ return status;
+
+ return 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int pt_insn_at_disabled_event(const struct pt_event *ev,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext,
+ const struct pt_config *config)
+{
+ if (!ev || !insn || !iext || !config)
+ return -pte_internal;
+
+ if (ev->ip_suppressed) {
+ if (pt_insn_is_far_branch(insn, iext) ||
+ pt_insn_changes_cpl(insn, iext) ||
+ pt_insn_changes_cr3(insn, iext))
+ return 1;
+
+ /* If we don't have a filter configuration we assume that no
+ * address filters were used and the erratum does not apply.
+ *
+ * We might otherwise disable tracing too early.
+ */
+ if (config->addr_filter.config.addr_cfg &&
+ config->errata.skl014 &&
+ pt_insn_at_skl014(ev, insn, iext, config))
+ return 1;
+ } else {
+ switch (insn->iclass) {
+ case ptic_ptwrite:
+ case ptic_other:
+ break;
+
+ case ptic_call:
+ case ptic_jump:
+ /* If we got an IP with the disabled event, we may
+ * ignore direct branches that go to a different IP.
+ */
+ if (iext->variant.branch.is_direct) {
+ uint64_t ip;
+
+ ip = insn->ip;
+ ip += insn->size;
+ ip += iext->variant.branch.displacement;
+
+ if (ip != ev->variant.disabled.ip)
+ break;
+ }
+
+ fallthrough;
+ case ptic_return:
+ case ptic_far_call:
+ case ptic_far_return:
+ case ptic_far_jump:
+ case ptic_cond_jump:
+ return 1;
+
+ case ptic_error:
+ return -pte_bad_insn;
+ }
+ }
+
+ return 0;
+}
+
+/* Postpone proceeding past @insn/@iext and indicate a pending event.
+ *
+ * There may be further events pending on @insn/@iext. Postpone proceeding past
+ * @insn/@iext until we processed all events that bind to it.
+ *
+ * Returns a non-negative pt_status_flag bit-vector indicating a pending event
+ * on success, a negative pt_error_code otherwise.
+ */
+static int pt_insn_postpone(struct pt_insn_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!decoder || !insn || !iext)
+ return -pte_internal;
+
+ if (!decoder->process_insn) {
+ decoder->process_insn = 1;
+ decoder->insn = *insn;
+ decoder->iext = *iext;
+ }
+
+ return pt_insn_status(decoder, pts_event_pending);
+}
+
+/* Remove any postponed instruction from @decoder.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_insn_clear_postponed(struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->process_insn = 0;
+ decoder->bound_paging = 0;
+ decoder->bound_vmcs = 0;
+ decoder->bound_ptwrite = 0;
+
+ return 0;
+}
+
+/* Proceed past a postponed instruction.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_insn_proceed_postponed(struct pt_insn_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ if (!decoder->process_insn)
+ return -pte_internal;
+
+ /* There's nothing to do if tracing got disabled. */
+ if (!decoder->enabled)
+ return pt_insn_clear_postponed(decoder);
+
+ status = pt_insn_proceed(decoder, &decoder->insn, &decoder->iext);
+ if (status < 0)
+ return status;
+
+ return pt_insn_clear_postponed(decoder);
+}
+
+/* Check for events that bind to instruction.
+ *
+ * Check whether an event is pending that binds to @insn/@iext, and, if that is
+ * the case, proceed past @insn/@iext and indicate the event by setting
+ * pts_event_pending.
+ *
+ * If that is not the case, we return zero. This is what pt_insn_status() would
+ * return since:
+ *
+ * - we suppress pts_eos as long as we're processing events
+ * - we do not set pts_ip_suppressed since tracing must be enabled
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ */
+static int pt_insn_check_insn_event(struct pt_insn_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ struct pt_event *ev;
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = event_pending(decoder);
+ if (status <= 0)
+ return status;
+
+ ev = &decoder->event;
+ switch (ev->type) {
+ case ptev_enabled:
+ case ptev_overflow:
+ case ptev_async_paging:
+ case ptev_async_vmcs:
+ case ptev_async_disabled:
+ case ptev_async_branch:
+ case ptev_exec_mode:
+ case ptev_tsx:
+ case ptev_stop:
+ case ptev_exstop:
+ case ptev_mwait:
+ case ptev_pwre:
+ case ptev_pwrx:
+ case ptev_tick:
+ case ptev_cbr:
+ case ptev_mnt:
+ /* We're only interested in events that bind to instructions. */
+ return 0;
+
+ case ptev_disabled:
+ status = pt_insn_at_disabled_event(ev, insn, iext,
+ &decoder->query.config);
+ if (status <= 0)
+ return status;
+
+ /* We're at a synchronous disable event location.
+ *
+ * Let's determine the IP at which we expect tracing to resume.
+ */
+ status = pt_insn_next_ip(&decoder->ip, insn, iext);
+ if (status < 0) {
+ /* We don't know the IP on error. */
+ decoder->ip = 0ull;
+
+ /* For indirect calls, assume that we return to the next
+ * instruction.
+ *
+ * We only check the instruction class, not the
+ * is_direct property, since direct calls would have
+ * been handled by pt_insn_nex_ip() or would have
+ * provoked a different error.
+ */
+ if (status != -pte_bad_query)
+ return status;
+
+ switch (insn->iclass) {
+ case ptic_call:
+ case ptic_far_call:
+ decoder->ip = insn->ip + insn->size;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ break;
+
+ case ptev_paging:
+ /* We bind at most one paging event to an instruction. */
+ if (decoder->bound_paging)
+ return 0;
+
+ if (!pt_insn_binds_to_pip(insn, iext))
+ return 0;
+
+ /* We bound a paging event. Make sure we do not bind further
+ * paging events to this instruction.
+ */
+ decoder->bound_paging = 1;
+
+ return pt_insn_postpone(decoder, insn, iext);
+
+ case ptev_vmcs:
+ /* We bind at most one vmcs event to an instruction. */
+ if (decoder->bound_vmcs)
+ return 0;
+
+ if (!pt_insn_binds_to_vmcs(insn, iext))
+ return 0;
+
+ /* We bound a vmcs event. Make sure we do not bind further vmcs
+ * events to this instruction.
+ */
+ decoder->bound_vmcs = 1;
+
+ return pt_insn_postpone(decoder, insn, iext);
+
+ case ptev_ptwrite:
+ /* We bind at most one ptwrite event to an instruction. */
+ if (decoder->bound_ptwrite)
+ return 0;
+
+ if (ev->ip_suppressed) {
+ if (!pt_insn_is_ptwrite(insn, iext))
+ return 0;
+
+ /* Fill in the event IP. Our users will need them to
+ * make sense of the PTWRITE payload.
+ */
+ ev->variant.ptwrite.ip = decoder->ip;
+ ev->ip_suppressed = 0;
+ } else {
+ /* The ptwrite event contains the IP of the ptwrite
+ * instruction (CLIP) unlike most events that contain
+ * the IP of the first instruction that did not complete
+ * (NLIP).
+ *
+ * It's easier to handle this case here, as well.
+ */
+ if (decoder->ip != ev->variant.ptwrite.ip)
+ return 0;
+ }
+
+ /* We bound a ptwrite event. Make sure we do not bind further
+ * ptwrite events to this instruction.
+ */
+ decoder->bound_ptwrite = 1;
+
+ return pt_insn_postpone(decoder, insn, iext);
+ }
+
+ return pt_insn_status(decoder, pts_event_pending);
+}
+
+enum {
+ /* The maximum number of steps to take when determining whether the
+ * event location can be reached.
+ */
+ bdm64_max_steps = 0x100
+};
+
+/* Try to work around erratum BDM64.
+ *
+ * If we got a transaction abort immediately following a branch that produced
+ * trace, the trace for that branch might have been corrupted.
+ *
+ * Returns a positive integer if the erratum was handled.
+ * Returns zero if the erratum does not seem to apply.
+ * Returns a negative error code otherwise.
+ */
+static int handle_erratum_bdm64(struct pt_insn_decoder *decoder,
+ const struct pt_event *ev,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ int status;
+
+ if (!decoder || !ev || !insn || !iext)
+ return -pte_internal;
+
+ /* This only affects aborts. */
+ if (!ev->variant.tsx.aborted)
+ return 0;
+
+ /* This only affects branches. */
+ if (!pt_insn_is_branch(insn, iext))
+ return 0;
+
+ /* Let's check if we can reach the event location from here.
+ *
+ * If we can, let's assume the erratum did not hit. We might still be
+ * wrong but we're not able to tell.
+ */
+ status = pt_insn_range_is_contiguous(decoder->ip, ev->variant.tsx.ip,
+ decoder->mode, decoder->image,
+ &decoder->asid, bdm64_max_steps);
+ if (status > 0)
+ return 0;
+
+ /* We can't reach the event location. This could either mean that we
+ * stopped too early (and status is zero) or that the erratum hit.
+ *
+ * We assume the latter and pretend that the previous branch brought us
+ * to the event location, instead.
+ */
+ decoder->ip = ev->variant.tsx.ip;
+
+ return 1;
+}
+
+/* Check whether a peek TSX event should be postponed.
+ *
+ * This involves handling erratum BDM64.
+ *
+ * Returns a positive integer if the event is to be postponed.
+ * Returns zero if the event should be processed.
+ * Returns a negative error code otherwise.
+ */
+static inline int pt_insn_postpone_tsx(struct pt_insn_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext,
+ const struct pt_event *ev)
+{
+ int status;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ if (ev->ip_suppressed)
+ return 0;
+
+ if (insn && iext && decoder->query.config.errata.bdm64) {
+ status = handle_erratum_bdm64(decoder, ev, insn, iext);
+ if (status < 0)
+ return status;
+ }
+
+ if (decoder->ip != ev->variant.tsx.ip)
+ return 1;
+
+ return 0;
+}
+
+/* Check for events that bind to an IP.
+ *
+ * Check whether an event is pending that binds to @decoder->ip, and, if that is
+ * the case, indicate the event by setting pt_pts_event_pending.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ */
+static int pt_insn_check_ip_event(struct pt_insn_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ struct pt_event *ev;
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = event_pending(decoder);
+ if (status <= 0) {
+ if (status < 0)
+ return status;
+
+ return pt_insn_status(decoder, 0);
+ }
+
+ ev = &decoder->event;
+ switch (ev->type) {
+ case ptev_disabled:
+ break;
+
+ case ptev_enabled:
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_async_disabled:
+ if (ev->variant.async_disabled.at != decoder->ip)
+ break;
+
+ if (decoder->query.config.errata.skd022) {
+ int errcode;
+
+ errcode = handle_erratum_skd022(decoder);
+ if (errcode != 0) {
+ if (errcode < 0)
+ return errcode;
+
+ /* If the erratum applies, we postpone the
+ * modified event to the next call to
+ * pt_insn_next().
+ */
+ break;
+ }
+ }
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_tsx:
+ status = pt_insn_postpone_tsx(decoder, insn, iext, ev);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ break;
+ }
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_async_branch:
+ if (ev->variant.async_branch.from != decoder->ip)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_overflow:
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_exec_mode:
+ if (!ev->ip_suppressed &&
+ ev->variant.exec_mode.ip != decoder->ip)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_paging:
+ if (decoder->enabled)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_async_paging:
+ if (!ev->ip_suppressed &&
+ ev->variant.async_paging.ip != decoder->ip)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_vmcs:
+ if (decoder->enabled)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_async_vmcs:
+ if (!ev->ip_suppressed &&
+ ev->variant.async_vmcs.ip != decoder->ip)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_stop:
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_exstop:
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.exstop.ip)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_mwait:
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.mwait.ip)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_pwre:
+ case ptev_pwrx:
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_ptwrite:
+ /* Any event binding to the current PTWRITE instruction is
+ * handled in pt_insn_check_insn_event().
+ *
+ * Any subsequent ptwrite event binds to a different instruction
+ * and must wait until the next iteration - as long as tracing
+ * is enabled.
+ *
+ * When tracing is disabled, we forward all ptwrite events
+ * immediately to the user.
+ */
+ if (decoder->enabled)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_tick:
+ case ptev_cbr:
+ case ptev_mnt:
+ return pt_insn_status(decoder, pts_event_pending);
+ }
+
+ return pt_insn_status(decoder, 0);
+}
+
+static inline int insn_to_user(struct pt_insn *uinsn, size_t size,
+ const struct pt_insn *insn)
+{
+ if (!uinsn || !insn)
+ return -pte_internal;
+
+ if (uinsn == insn)
+ return 0;
+
+ /* Zero out any unknown bytes. */
+ if (sizeof(*insn) < size) {
+ memset(uinsn + sizeof(*insn), 0, size - sizeof(*insn));
+
+ size = sizeof(*insn);
+ }
+
+ memcpy(uinsn, insn, size);
+
+ return 0;
+}
+
+static int pt_insn_decode_cached(struct pt_insn_decoder *decoder,
+ const struct pt_mapped_section *msec,
+ struct pt_insn *insn, struct pt_insn_ext *iext)
+{
+ int status;
+
+ if (!decoder || !insn || !iext)
+ return -pte_internal;
+
+ /* Try reading the memory containing @insn from the cached section. If
+ * that fails, if we don't have a cached section, or if decode fails
+ * later on, fall back to decoding @insn from @decoder->image.
+ *
+ * The latter will also handle truncated instructions that cross section
+ * boundaries.
+ */
+
+ if (!msec)
+ return pt_insn_decode(insn, iext, decoder->image,
+ &decoder->asid);
+
+ status = pt_msec_read(msec, insn->raw, sizeof(insn->raw), insn->ip);
+ if (status < 0) {
+ if (status != -pte_nomap)
+ return status;
+
+ return pt_insn_decode(insn, iext, decoder->image,
+ &decoder->asid);
+ }
+
+ /* We initialize @insn->size to the maximal possible size. It will be
+ * set to the actual size during instruction decode.
+ */
+ insn->size = (uint8_t) status;
+
+ status = pt_ild_decode(insn, iext);
+ if (status < 0) {
+ if (status != -pte_bad_insn)
+ return status;
+
+ return pt_insn_decode(insn, iext, decoder->image,
+ &decoder->asid);
+ }
+
+ return status;
+}
+
+static int pt_insn_msec_lookup(struct pt_insn_decoder *decoder,
+ const struct pt_mapped_section **pmsec)
+{
+ struct pt_msec_cache *scache;
+ struct pt_image *image;
+ uint64_t ip;
+ int isid;
+
+ if (!decoder || !pmsec)
+ return -pte_internal;
+
+ scache = &decoder->scache;
+ image = decoder->image;
+ ip = decoder->ip;
+
+ isid = pt_msec_cache_read(scache, pmsec, image, ip);
+ if (isid < 0) {
+ if (isid != -pte_nomap)
+ return isid;
+
+ return pt_msec_cache_fill(scache, pmsec, image,
+ &decoder->asid, ip);
+ }
+
+ return isid;
+}
+
+int pt_insn_next(struct pt_insn_decoder *decoder, struct pt_insn *uinsn,
+ size_t size)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_insn_ext iext;
+ struct pt_insn insn, *pinsn;
+ int status, isid;
+
+ if (!uinsn || !decoder)
+ return -pte_invalid;
+
+ /* Tracing must be enabled.
+ *
+ * If it isn't we should be processing events until we either run out of
+ * trace or process a tracing enabled event.
+ */
+ if (!decoder->enabled) {
+ if (decoder->status & pts_eos)
+ return -pte_eos;
+
+ return -pte_no_enable;
+ }
+
+ pinsn = size == sizeof(insn) ? uinsn : &insn;
+
+ /* Zero-initialize the instruction in case of error returns. */
+ memset(pinsn, 0, sizeof(*pinsn));
+
+ /* Fill in a few things from the current decode state.
+ *
+ * This reflects the state of the last pt_insn_next(), pt_insn_event()
+ * or pt_insn_start() call.
+ */
+ if (decoder->speculative)
+ pinsn->speculative = 1;
+ pinsn->ip = decoder->ip;
+ pinsn->mode = decoder->mode;
+
+ isid = pt_insn_msec_lookup(decoder, &msec);
+ if (isid < 0) {
+ if (isid != -pte_nomap)
+ return isid;
+
+ msec = NULL;
+ }
+
+ /* We set an incorrect isid if @msec is NULL. This will be corrected
+ * when we read the memory from the image later on.
+ */
+ pinsn->isid = isid;
+
+ status = pt_insn_decode_cached(decoder, msec, pinsn, &iext);
+ if (status < 0) {
+ /* Provide the incomplete instruction - the IP and mode fields
+ * are valid and may help diagnose the error.
+ */
+ (void) insn_to_user(uinsn, size, pinsn);
+ return status;
+ }
+
+ /* Provide the decoded instruction to the user. It won't change during
+ * event processing.
+ */
+ status = insn_to_user(uinsn, size, pinsn);
+ if (status < 0)
+ return status;
+
+ /* Check for events that bind to the current instruction.
+ *
+ * If an event is indicated, we're done.
+ */
+ status = pt_insn_check_insn_event(decoder, pinsn, &iext);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ if (status & pts_event_pending)
+ return status;
+ }
+
+ /* Determine the next instruction's IP. */
+ status = pt_insn_proceed(decoder, pinsn, &iext);
+ if (status < 0)
+ return status;
+
+ /* Indicate events that bind to the new IP.
+ *
+ * Although we only look at the IP for binding events, we pass the
+ * decoded instruction in order to handle errata.
+ */
+ return pt_insn_check_ip_event(decoder, pinsn, &iext);
+}
+
+static int pt_insn_process_enabled(struct pt_insn_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = &decoder->event;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* We must have an IP in order to start decoding. */
+ if (ev->ip_suppressed)
+ return -pte_noip;
+
+ /* We must currently be disabled. */
+ if (decoder->enabled)
+ return -pte_bad_context;
+
+ decoder->ip = ev->variant.enabled.ip;
+ decoder->enabled = 1;
+
+ return 0;
+}
+
+static int pt_insn_process_disabled(struct pt_insn_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = &decoder->event;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* We must currently be enabled. */
+ if (!decoder->enabled)
+ return -pte_bad_context;
+
+ /* We preserve @decoder->ip. This is where we expect tracing to resume
+ * and we'll indicate that on the subsequent enabled event if tracing
+ * actually does resume from there.
+ */
+ decoder->enabled = 0;
+
+ return 0;
+}
+
+static int pt_insn_process_async_branch(struct pt_insn_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = &decoder->event;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* Tracing must be enabled in order to make sense of the event. */
+ if (!decoder->enabled)
+ return -pte_bad_context;
+
+ decoder->ip = ev->variant.async_branch.to;
+
+ return 0;
+}
+
+static int pt_insn_process_paging(struct pt_insn_decoder *decoder)
+{
+ uint64_t cr3;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ cr3 = decoder->event.variant.paging.cr3;
+ if (decoder->asid.cr3 != cr3) {
+ errcode = pt_msec_cache_invalidate(&decoder->scache);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->asid.cr3 = cr3;
+ }
+
+ return 0;
+}
+
+static int pt_insn_process_overflow(struct pt_insn_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = &decoder->event;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* If the IP is suppressed, the overflow resolved while tracing was
+ * disabled. Otherwise it resolved while tracing was enabled.
+ */
+ if (ev->ip_suppressed) {
+ /* Tracing is disabled.
+ *
+ * It doesn't make sense to preserve the previous IP. This will
+ * just be misleading. Even if tracing had been disabled
+ * before, as well, we might have missed the re-enable in the
+ * overflow.
+ */
+ decoder->enabled = 0;
+ decoder->ip = 0ull;
+ } else {
+ /* Tracing is enabled and we're at the IP at which the overflow
+ * resolved.
+ */
+ decoder->ip = ev->variant.overflow.ip;
+ decoder->enabled = 1;
+ }
+
+ /* We don't know the TSX state. Let's assume we execute normally.
+ *
+ * We also don't know the execution mode. Let's keep what we have
+ * in case we don't get an update before we have to decode the next
+ * instruction.
+ */
+ decoder->speculative = 0;
+
+ return 0;
+}
+
+static int pt_insn_process_exec_mode(struct pt_insn_decoder *decoder)
+{
+ enum pt_exec_mode mode;
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = &decoder->event;
+ mode = ev->variant.exec_mode.mode;
+
+ /* Use status update events to diagnose inconsistencies. */
+ if (ev->status_update && decoder->enabled &&
+ decoder->mode != ptem_unknown && decoder->mode != mode)
+ return -pte_bad_status_update;
+
+ decoder->mode = mode;
+
+ return 0;
+}
+
+static int pt_insn_process_tsx(struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->speculative = decoder->event.variant.tsx.speculative;
+
+ return 0;
+}
+
+static int pt_insn_process_stop(struct pt_insn_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = &decoder->event;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* Tracing is always disabled before it is stopped. */
+ if (decoder->enabled)
+ return -pte_bad_context;
+
+ return 0;
+}
+
+static int pt_insn_process_vmcs(struct pt_insn_decoder *decoder)
+{
+ uint64_t vmcs;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ vmcs = decoder->event.variant.vmcs.base;
+ if (decoder->asid.vmcs != vmcs) {
+ errcode = pt_msec_cache_invalidate(&decoder->scache);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->asid.vmcs = vmcs;
+ }
+
+ return 0;
+}
+
+int pt_insn_event(struct pt_insn_decoder *decoder, struct pt_event *uevent,
+ size_t size)
+{
+ struct pt_event *ev;
+ int status;
+
+ if (!decoder || !uevent)
+ return -pte_invalid;
+
+ /* We must currently process an event. */
+ if (!decoder->process_event)
+ return -pte_bad_query;
+
+ ev = &decoder->event;
+ switch (ev->type) {
+ default:
+ /* This is not a user event.
+ *
+ * We either indicated it wrongly or the user called
+ * pt_insn_event() without a pts_event_pending indication.
+ */
+ return -pte_bad_query;
+
+ case ptev_enabled:
+ /* Indicate that tracing resumes from the IP at which tracing
+ * had been disabled before (with some special treatment for
+ * calls).
+ */
+ if (decoder->ip == ev->variant.enabled.ip)
+ ev->variant.enabled.resumed = 1;
+
+ status = pt_insn_process_enabled(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_disabled:
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_disabled.at)
+ return -pte_bad_query;
+
+ fallthrough;
+ case ptev_disabled:
+ status = pt_insn_process_disabled(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_branch:
+ if (decoder->ip != ev->variant.async_branch.from)
+ return -pte_bad_query;
+
+ status = pt_insn_process_async_branch(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_paging:
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_paging.ip)
+ return -pte_bad_query;
+
+ fallthrough;
+ case ptev_paging:
+ status = pt_insn_process_paging(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_vmcs:
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_vmcs.ip)
+ return -pte_bad_query;
+
+ fallthrough;
+ case ptev_vmcs:
+ status = pt_insn_process_vmcs(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_overflow:
+ status = pt_insn_process_overflow(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_exec_mode:
+ status = pt_insn_process_exec_mode(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_tsx:
+ status = pt_insn_process_tsx(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_stop:
+ status = pt_insn_process_stop(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_exstop:
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.exstop.ip)
+ return -pte_bad_query;
+
+ break;
+
+ case ptev_mwait:
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.mwait.ip)
+ return -pte_bad_query;
+
+ break;
+
+ case ptev_pwre:
+ case ptev_pwrx:
+ case ptev_ptwrite:
+ case ptev_tick:
+ case ptev_cbr:
+ case ptev_mnt:
+ break;
+ }
+
+ /* Copy the event to the user. Make sure we're not writing beyond the
+ * memory provided by the user.
+ *
+ * We might truncate details of an event but only for those events the
+ * user can't know about, anyway.
+ */
+ if (sizeof(*ev) < size)
+ size = sizeof(*ev);
+
+ memcpy(uevent, ev, size);
+
+ /* This completes processing of the current event. */
+ decoder->process_event = 0;
+
+ /* If we just handled an instruction event, check for further events
+ * that bind to this instruction.
+ *
+ * If we don't have further events, proceed beyond the instruction so we
+ * can check for IP events, as well.
+ */
+ if (decoder->process_insn) {
+ status = pt_insn_check_insn_event(decoder, &decoder->insn,
+ &decoder->iext);
+
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ if (status & pts_event_pending)
+ return status;
+ }
+
+ /* Proceed to the next instruction. */
+ status = pt_insn_proceed_postponed(decoder);
+ if (status < 0)
+ return status;
+ }
+
+ /* Indicate further events that bind to the same IP. */
+ return pt_insn_check_ip_event(decoder, NULL, NULL);
+}
diff --git a/libipt/src/pt_last_ip.c b/libipt/src/pt_last_ip.c
new file mode 100644
index 000000000000..3c98c9c3c95a
--- /dev/null
+++ b/libipt/src/pt_last_ip.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_last_ip.h"
+
+#include "intel-pt.h"
+
+
+void pt_last_ip_init(struct pt_last_ip *last_ip)
+{
+ if (!last_ip)
+ return;
+
+ last_ip->ip = 0ull;
+ last_ip->have_ip = 0;
+ last_ip->suppressed = 0;
+}
+
+int pt_last_ip_query(uint64_t *ip, const struct pt_last_ip *last_ip)
+{
+ if (!last_ip)
+ return -pte_internal;
+
+ if (!last_ip->have_ip) {
+ if (ip)
+ *ip = 0ull;
+ return -pte_noip;
+ }
+
+ if (last_ip->suppressed) {
+ if (ip)
+ *ip = 0ull;
+ return -pte_ip_suppressed;
+ }
+
+ if (ip)
+ *ip = last_ip->ip;
+
+ return 0;
+}
+
+/* Sign-extend a uint64_t value. */
+static uint64_t sext(uint64_t val, uint8_t sign)
+{
+ uint64_t signbit, mask;
+
+ signbit = 1ull << (sign - 1);
+ mask = ~0ull << sign;
+
+ return val & signbit ? val | mask : val & ~mask;
+}
+
+int pt_last_ip_update_ip(struct pt_last_ip *last_ip,
+ const struct pt_packet_ip *packet,
+ const struct pt_config *config)
+{
+ (void) config;
+
+ if (!last_ip || !packet)
+ return -pte_internal;
+
+ switch (packet->ipc) {
+ case pt_ipc_suppressed:
+ last_ip->suppressed = 1;
+ return 0;
+
+ case pt_ipc_sext_48:
+ last_ip->ip = sext(packet->ip, 48);
+ last_ip->have_ip = 1;
+ last_ip->suppressed = 0;
+ return 0;
+
+ case pt_ipc_update_16:
+ last_ip->ip = (last_ip->ip & ~0xffffull)
+ | (packet->ip & 0xffffull);
+ last_ip->have_ip = 1;
+ last_ip->suppressed = 0;
+ return 0;
+
+ case pt_ipc_update_32:
+ last_ip->ip = (last_ip->ip & ~0xffffffffull)
+ | (packet->ip & 0xffffffffull);
+ last_ip->have_ip = 1;
+ last_ip->suppressed = 0;
+ return 0;
+
+ case pt_ipc_update_48:
+ last_ip->ip = (last_ip->ip & ~0xffffffffffffull)
+ | (packet->ip & 0xffffffffffffull);
+ last_ip->have_ip = 1;
+ last_ip->suppressed = 0;
+ return 0;
+
+ case pt_ipc_full:
+ last_ip->ip = packet->ip;
+ last_ip->have_ip = 1;
+ last_ip->suppressed = 0;
+ return 0;
+ }
+
+ return -pte_bad_packet;
+}
diff --git a/libipt/src/pt_msec_cache.c b/libipt/src/pt_msec_cache.c
new file mode 100644
index 000000000000..da946e0552d2
--- /dev/null
+++ b/libipt/src/pt_msec_cache.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_msec_cache.h"
+#include "pt_section.h"
+#include "pt_image.h"
+
+#include <string.h>
+
+
+int pt_msec_cache_init(struct pt_msec_cache *cache)
+{
+ if (!cache)
+ return -pte_internal;
+
+ memset(cache, 0, sizeof(*cache));
+
+ return 0;
+}
+
+void pt_msec_cache_fini(struct pt_msec_cache *cache)
+{
+ if (!cache)
+ return;
+
+ (void) pt_msec_cache_invalidate(cache);
+ pt_msec_fini(&cache->msec);
+}
+
+int pt_msec_cache_invalidate(struct pt_msec_cache *cache)
+{
+ struct pt_section *section;
+ int errcode;
+
+ if (!cache)
+ return -pte_internal;
+
+ section = pt_msec_section(&cache->msec);
+ if (!section)
+ return 0;
+
+ errcode = pt_section_unmap(section);
+ if (errcode < 0)
+ return errcode;
+
+ cache->msec.section = NULL;
+
+ return pt_section_put(section);
+}
+
+int pt_msec_cache_read(struct pt_msec_cache *cache,
+ const struct pt_mapped_section **pmsec,
+ struct pt_image *image, uint64_t vaddr)
+{
+ struct pt_mapped_section *msec;
+ int isid, errcode;
+
+ if (!cache || !pmsec)
+ return -pte_internal;
+
+ msec = &cache->msec;
+ isid = cache->isid;
+
+ errcode = pt_image_validate(image, msec, vaddr, isid);
+ if (errcode < 0)
+ return errcode;
+
+ *pmsec = msec;
+
+ return isid;
+
+}
+
+int pt_msec_cache_fill(struct pt_msec_cache *cache,
+ const struct pt_mapped_section **pmsec,
+ struct pt_image *image, const struct pt_asid *asid,
+ uint64_t vaddr)
+{
+ struct pt_mapped_section *msec;
+ struct pt_section *section;
+ int errcode, isid;
+
+ if (!cache || !pmsec)
+ return -pte_internal;
+
+ errcode = pt_msec_cache_invalidate(cache);
+ if (errcode < 0)
+ return errcode;
+
+ msec = &cache->msec;
+
+ isid = pt_image_find(image, msec, asid, vaddr);
+ if (isid < 0)
+ return isid;
+
+ section = pt_msec_section(msec);
+
+ errcode = pt_section_map(section);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ msec->section = NULL;
+
+ return errcode;
+ }
+
+ *pmsec = msec;
+
+ cache->isid = isid;
+
+ return isid;
+}
diff --git a/libipt/src/pt_packet.c b/libipt/src/pt_packet.c
new file mode 100644
index 000000000000..e237427ec127
--- /dev/null
+++ b/libipt/src/pt_packet.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_packet.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+#include <limits.h>
+
+
+static uint64_t pt_pkt_read_value(const uint8_t *pos, int size)
+{
+ uint64_t val;
+ int idx;
+
+ for (val = 0, idx = 0; idx < size; ++idx) {
+ uint64_t byte = *pos++;
+
+ byte <<= (idx * 8);
+ val |= byte;
+ }
+
+ return val;
+}
+
+int pt_pkt_read_unknown(struct pt_packet *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ int (*decode)(struct pt_packet_unknown *, const struct pt_config *,
+ const uint8_t *, void *);
+ int size;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ decode = config->decode.callback;
+ if (!decode)
+ return -pte_bad_opc;
+
+ /* Fill in some default values. */
+ packet->payload.unknown.packet = pos;
+ packet->payload.unknown.priv = NULL;
+
+ /* We accept a size of zero to allow the callback to modify the
+ * trace buffer and resume normal decoding.
+ */
+ size = (*decode)(&packet->payload.unknown, config, pos,
+ config->decode.context);
+ if (size < 0)
+ return size;
+
+ if (size > UCHAR_MAX)
+ return -pte_invalid;
+
+ packet->type = ppt_unknown;
+ packet->size = (uint8_t) size;
+
+ if (config->end < pos + size)
+ return -pte_eos;
+
+ return size;
+}
+
+int pt_pkt_read_psb(const uint8_t *pos, const struct pt_config *config)
+{
+ int count;
+
+ if (!pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_psb)
+ return -pte_eos;
+
+ pos += pt_opcs_psb;
+
+ for (count = 0; count < pt_psb_repeat_count; ++count) {
+ if (*pos++ != pt_psb_hi)
+ return -pte_bad_packet;
+ if (*pos++ != pt_psb_lo)
+ return -pte_bad_packet;
+ }
+
+ return ptps_psb;
+}
+
+static int pt_pkt_ip_size(enum pt_ip_compression ipc)
+{
+ switch (ipc) {
+ case pt_ipc_suppressed:
+ return 0;
+
+ case pt_ipc_update_16:
+ return 2;
+
+ case pt_ipc_update_32:
+ return 4;
+
+ case pt_ipc_update_48:
+ case pt_ipc_sext_48:
+ return 6;
+
+ case pt_ipc_full:
+ return 8;
+ }
+
+ return -pte_bad_packet;
+}
+
+int pt_pkt_read_ip(struct pt_packet_ip *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint64_t ip;
+ uint8_t ipc;
+ int ipsize;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ ipc = (*pos++ >> pt_opm_ipc_shr) & pt_opm_ipc_shr_mask;
+
+ ip = 0ull;
+ ipsize = pt_pkt_ip_size((enum pt_ip_compression) ipc);
+ if (ipsize < 0)
+ return ipsize;
+
+ if (config->end < pos + ipsize)
+ return -pte_eos;
+
+ if (ipsize)
+ ip = pt_pkt_read_value(pos, ipsize);
+
+ packet->ipc = (enum pt_ip_compression) ipc;
+ packet->ip = ip;
+
+ return ipsize + 1;
+}
+
+static uint8_t pt_pkt_tnt_bit_size(uint64_t payload)
+{
+ uint8_t size;
+
+ /* The payload bit-size is the bit-index of the payload's stop-bit,
+ * which itself is not part of the payload proper.
+ */
+ for (size = 0; ; size += 1) {
+ payload >>= 1;
+ if (!payload)
+ break;
+ }
+
+ return size;
+}
+
+static int pt_pkt_read_tnt(struct pt_packet_tnt *packet, uint64_t payload)
+{
+ uint8_t bit_size;
+
+ if (!packet)
+ return -pte_internal;
+
+ bit_size = pt_pkt_tnt_bit_size(payload);
+ if (!bit_size)
+ return -pte_bad_packet;
+
+ /* Remove the stop bit from the payload. */
+ payload &= ~(1ull << bit_size);
+
+ packet->payload = payload;
+ packet->bit_size = bit_size;
+
+ return 0;
+}
+
+int pt_pkt_read_tnt_8(struct pt_packet_tnt *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ (void) config;
+
+ if (!pos)
+ return -pte_internal;
+
+ errcode = pt_pkt_read_tnt(packet, pos[0] >> pt_opm_tnt_8_shr);
+ if (errcode < 0)
+ return errcode;
+
+ return ptps_tnt_8;
+}
+
+int pt_pkt_read_tnt_64(struct pt_packet_tnt *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint64_t payload;
+ int errcode;
+
+ if (!pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_tnt_64)
+ return -pte_eos;
+
+ payload = pt_pkt_read_value(pos + pt_opcs_tnt_64, pt_pl_tnt_64_size);
+
+ errcode = pt_pkt_read_tnt(packet, payload);
+ if (errcode < 0)
+ return errcode;
+
+ return ptps_tnt_64;
+}
+
+int pt_pkt_read_pip(struct pt_packet_pip *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint64_t payload;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_pip)
+ return -pte_eos;
+
+ /* Read the payload. */
+ payload = pt_pkt_read_value(pos + pt_opcs_pip, pt_pl_pip_size);
+
+ /* Extract the non-root information from the payload. */
+ packet->nr = payload & pt_pl_pip_nr;
+
+ /* Create the cr3 value. */
+ payload >>= pt_pl_pip_shr;
+ payload <<= pt_pl_pip_shl;
+ packet->cr3 = payload;
+
+ return ptps_pip;
+}
+
+static int pt_pkt_read_mode_exec(struct pt_packet_mode_exec *packet,
+ uint8_t mode)
+{
+ if (!packet)
+ return -pte_internal;
+
+ packet->csl = (mode & pt_mob_exec_csl) != 0;
+ packet->csd = (mode & pt_mob_exec_csd) != 0;
+
+ return ptps_mode;
+}
+
+static int pt_pkt_read_mode_tsx(struct pt_packet_mode_tsx *packet,
+ uint8_t mode)
+{
+ if (!packet)
+ return -pte_internal;
+
+ packet->intx = (mode & pt_mob_tsx_intx) != 0;
+ packet->abrt = (mode & pt_mob_tsx_abrt) != 0;
+
+ return ptps_mode;
+}
+
+int pt_pkt_read_mode(struct pt_packet_mode *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint8_t payload, mode, leaf;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_mode)
+ return -pte_eos;
+
+ payload = pos[pt_opcs_mode];
+ leaf = payload & pt_mom_leaf;
+ mode = payload & pt_mom_bits;
+
+ packet->leaf = (enum pt_mode_leaf) leaf;
+ switch (leaf) {
+ default:
+ return -pte_bad_packet;
+
+ case pt_mol_exec:
+ return pt_pkt_read_mode_exec(&packet->bits.exec, mode);
+
+ case pt_mol_tsx:
+ return pt_pkt_read_mode_tsx(&packet->bits.tsx, mode);
+ }
+}
+
+int pt_pkt_read_tsc(struct pt_packet_tsc *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_tsc)
+ return -pte_eos;
+
+ packet->tsc = pt_pkt_read_value(pos + pt_opcs_tsc, pt_pl_tsc_size);
+
+ return ptps_tsc;
+}
+
+int pt_pkt_read_cbr(struct pt_packet_cbr *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_cbr)
+ return -pte_eos;
+
+ packet->ratio = pos[2];
+
+ return ptps_cbr;
+}
+
+int pt_pkt_read_tma(struct pt_packet_tma *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint16_t ctc, fc;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_tma)
+ return -pte_eos;
+
+ ctc = pos[pt_pl_tma_ctc_0];
+ ctc |= pos[pt_pl_tma_ctc_1] << 8;
+
+ fc = pos[pt_pl_tma_fc_0];
+ fc |= pos[pt_pl_tma_fc_1] << 8;
+
+ if (fc & ~pt_pl_tma_fc_mask)
+ return -pte_bad_packet;
+
+ packet->ctc = ctc;
+ packet->fc = fc;
+
+ return ptps_tma;
+}
+
+int pt_pkt_read_mtc(struct pt_packet_mtc *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_mtc)
+ return -pte_eos;
+
+ packet->ctc = pos[pt_opcs_mtc];
+
+ return ptps_mtc;
+}
+
+int pt_pkt_read_cyc(struct pt_packet_cyc *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+ uint64_t value;
+ uint8_t cyc, ext, shl;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ begin = pos;
+ end = config->end;
+
+ /* The first byte contains the opcode and part of the payload.
+ * We already checked that this first byte is within bounds.
+ */
+ cyc = *pos++;
+
+ ext = cyc & pt_opm_cyc_ext;
+ cyc >>= pt_opm_cyc_shr;
+
+ value = cyc;
+ shl = (8 - pt_opm_cyc_shr);
+
+ while (ext) {
+ uint64_t bits;
+
+ if (end <= pos)
+ return -pte_eos;
+
+ bits = *pos++;
+ ext = bits & pt_opm_cycx_ext;
+
+ bits >>= pt_opm_cycx_shr;
+ bits <<= shl;
+
+ shl += (8 - pt_opm_cycx_shr);
+ if (sizeof(value) * 8 < shl)
+ return -pte_bad_packet;
+
+ value |= bits;
+ }
+
+ packet->value = value;
+
+ return (int) (pos - begin);
+}
+
+int pt_pkt_read_vmcs(struct pt_packet_vmcs *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint64_t payload;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_vmcs)
+ return -pte_eos;
+
+ payload = pt_pkt_read_value(pos + pt_opcs_vmcs, pt_pl_vmcs_size);
+
+ packet->base = payload << pt_pl_vmcs_shl;
+
+ return ptps_vmcs;
+}
+
+int pt_pkt_read_mnt(struct pt_packet_mnt *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_mnt)
+ return -pte_eos;
+
+ packet->payload = pt_pkt_read_value(pos + pt_opcs_mnt, pt_pl_mnt_size);
+
+ return ptps_mnt;
+}
+
+int pt_pkt_read_exstop(struct pt_packet_exstop *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_exstop)
+ return -pte_eos;
+
+ packet->ip = pos[1] & pt_pl_exstop_ip_mask ? 1 : 0;
+
+ return ptps_exstop;
+}
+
+int pt_pkt_read_mwait(struct pt_packet_mwait *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_mwait)
+ return -pte_eos;
+
+ packet->hints = (uint32_t) pt_pkt_read_value(pos + pt_opcs_mwait,
+ pt_pl_mwait_hints_size);
+ packet->ext = (uint32_t) pt_pkt_read_value(pos + pt_opcs_mwait +
+ pt_pl_mwait_hints_size,
+ pt_pl_mwait_ext_size);
+ return ptps_mwait;
+}
+
+int pt_pkt_read_pwre(struct pt_packet_pwre *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint64_t payload;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_pwre)
+ return -pte_eos;
+
+ payload = pt_pkt_read_value(pos + pt_opcs_pwre, pt_pl_pwre_size);
+
+ memset(packet, 0, sizeof(*packet));
+ packet->state = (uint8_t) ((payload & pt_pl_pwre_state_mask) >>
+ pt_pl_pwre_state_shr);
+ packet->sub_state = (uint8_t) ((payload & pt_pl_pwre_sub_state_mask) >>
+ pt_pl_pwre_sub_state_shr);
+ if (payload & pt_pl_pwre_hw_mask)
+ packet->hw = 1;
+
+ return ptps_pwre;
+}
+
+int pt_pkt_read_pwrx(struct pt_packet_pwrx *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint64_t payload;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_pwrx)
+ return -pte_eos;
+
+ payload = pt_pkt_read_value(pos + pt_opcs_pwrx, pt_pl_pwrx_size);
+
+ memset(packet, 0, sizeof(*packet));
+ packet->last = (uint8_t) ((payload & pt_pl_pwrx_last_mask) >>
+ pt_pl_pwrx_last_shr);
+ packet->deepest = (uint8_t) ((payload & pt_pl_pwrx_deepest_mask) >>
+ pt_pl_pwrx_deepest_shr);
+ if (payload & pt_pl_pwrx_wr_int)
+ packet->interrupt = 1;
+ if (payload & pt_pl_pwrx_wr_store)
+ packet->store = 1;
+ if (payload & pt_pl_pwrx_wr_hw)
+ packet->autonomous = 1;
+
+ return ptps_pwrx;
+}
+
+int pt_pkt_read_ptw(struct pt_packet_ptw *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint8_t opc, plc;
+ int size;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ /* Skip the ext opcode. */
+ pos++;
+
+ opc = *pos++;
+ plc = (opc >> pt_opm_ptw_pb_shr) & pt_opm_ptw_pb_shr_mask;
+
+ size = pt_ptw_size(plc);
+ if (size < 0)
+ return size;
+
+ if (config->end < pos + size)
+ return -pte_eos;
+
+ packet->payload = pt_pkt_read_value(pos, size);
+ packet->plc = plc;
+ packet->ip = opc & pt_opm_ptw_ip ? 1 : 0;
+
+ return pt_opcs_ptw + size;
+}
diff --git a/libipt/src/pt_packet_decoder.c b/libipt/src/pt_packet_decoder.c
new file mode 100644
index 000000000000..bf1a1bd0843e
--- /dev/null
+++ b/libipt/src/pt_packet_decoder.c
@@ -0,0 +1,723 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_packet_decoder.h"
+#include "pt_decoder_function.h"
+#include "pt_packet.h"
+#include "pt_sync.h"
+#include "pt_config.h"
+#include "pt_opcodes.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+
+int pt_pkt_decoder_init(struct pt_packet_decoder *decoder,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ if (!decoder || !config)
+ return -pte_invalid;
+
+ memset(decoder, 0, sizeof(*decoder));
+
+ errcode = pt_config_from_user(&decoder->config, config);
+ if (errcode < 0)
+ return errcode;
+
+ return 0;
+}
+
+struct pt_packet_decoder *pt_pkt_alloc_decoder(const struct pt_config *config)
+{
+ struct pt_packet_decoder *decoder;
+ int errcode;
+
+ decoder = malloc(sizeof(*decoder));
+ if (!decoder)
+ return NULL;
+
+ errcode = pt_pkt_decoder_init(decoder, config);
+ if (errcode < 0) {
+ free(decoder);
+ return NULL;
+ }
+
+ return decoder;
+}
+
+void pt_pkt_decoder_fini(struct pt_packet_decoder *decoder)
+{
+ (void) decoder;
+
+ /* Nothing to do. */
+}
+
+void pt_pkt_free_decoder(struct pt_packet_decoder *decoder)
+{
+ pt_pkt_decoder_fini(decoder);
+ free(decoder);
+}
+
+int pt_pkt_sync_forward(struct pt_packet_decoder *decoder)
+{
+ const uint8_t *pos, *sync;
+ int errcode;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ sync = decoder->sync;
+ pos = decoder->pos;
+ if (!pos)
+ pos = decoder->config.begin;
+
+ if (pos == sync)
+ pos += ptps_psb;
+
+ errcode = pt_sync_forward(&sync, pos, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->sync = sync;
+ decoder->pos = sync;
+
+ return 0;
+}
+
+int pt_pkt_sync_backward(struct pt_packet_decoder *decoder)
+{
+ const uint8_t *pos, *sync;
+ int errcode;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ pos = decoder->pos;
+ if (!pos)
+ pos = decoder->config.end;
+
+ errcode = pt_sync_backward(&sync, pos, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->sync = sync;
+ decoder->pos = sync;
+
+ return 0;
+}
+
+int pt_pkt_sync_set(struct pt_packet_decoder *decoder, uint64_t offset)
+{
+ const uint8_t *begin, *end, *pos;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ begin = decoder->config.begin;
+ end = decoder->config.end;
+ pos = begin + offset;
+
+ if (end < pos || pos < begin)
+ return -pte_eos;
+
+ decoder->sync = pos;
+ decoder->pos = pos;
+
+ return 0;
+}
+
+int pt_pkt_get_offset(const struct pt_packet_decoder *decoder, uint64_t *offset)
+{
+ const uint8_t *begin, *pos;
+
+ if (!decoder || !offset)
+ return -pte_invalid;
+
+ begin = decoder->config.begin;
+ pos = decoder->pos;
+
+ if (!pos)
+ return -pte_nosync;
+
+ *offset = pos - begin;
+ return 0;
+}
+
+int pt_pkt_get_sync_offset(const struct pt_packet_decoder *decoder,
+ uint64_t *offset)
+{
+ const uint8_t *begin, *sync;
+
+ if (!decoder || !offset)
+ return -pte_invalid;
+
+ begin = decoder->config.begin;
+ sync = decoder->sync;
+
+ if (!sync)
+ return -pte_nosync;
+
+ *offset = sync - begin;
+ return 0;
+}
+
+const struct pt_config *
+pt_pkt_get_config(const struct pt_packet_decoder *decoder)
+{
+ if (!decoder)
+ return NULL;
+
+ return &decoder->config;
+}
+
+static inline int pkt_to_user(struct pt_packet *upkt, size_t size,
+ const struct pt_packet *pkt)
+{
+ if (!upkt || !pkt)
+ return -pte_internal;
+
+ if (upkt == pkt)
+ return 0;
+
+ /* Zero out any unknown bytes. */
+ if (sizeof(*pkt) < size) {
+ memset(upkt + sizeof(*pkt), 0, size - sizeof(*pkt));
+
+ size = sizeof(*pkt);
+ }
+
+ memcpy(upkt, pkt, size);
+
+ return 0;
+}
+
+int pt_pkt_next(struct pt_packet_decoder *decoder, struct pt_packet *packet,
+ size_t psize)
+{
+ const struct pt_decoder_function *dfun;
+ struct pt_packet pkt, *ppkt;
+ int errcode, size;
+
+ if (!packet || !decoder)
+ return -pte_invalid;
+
+ ppkt = psize == sizeof(pkt) ? packet : &pkt;
+
+ errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ if (!dfun)
+ return -pte_internal;
+
+ if (!dfun->packet)
+ return -pte_internal;
+
+ size = dfun->packet(decoder, ppkt);
+ if (size < 0)
+ return size;
+
+ errcode = pkt_to_user(packet, psize, ppkt);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+
+ return size;
+}
+
+int pt_pkt_decode_unknown(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_unknown(packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ return size;
+}
+
+int pt_pkt_decode_pad(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ (void) decoder;
+
+ if (!packet)
+ return -pte_internal;
+
+ packet->type = ppt_pad;
+ packet->size = ptps_pad;
+
+ return ptps_pad;
+}
+
+int pt_pkt_decode_psb(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_psb(decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_psb;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_tip(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_ip(&packet->payload.ip, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tip;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_tnt_8(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_tnt_8(&packet->payload.tnt, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tnt_8;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_tnt_64(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_tnt_64(&packet->payload.tnt, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tnt_64;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_tip_pge(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_ip(&packet->payload.ip, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tip_pge;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_tip_pgd(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_ip(&packet->payload.ip, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tip_pgd;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_fup(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_ip(&packet->payload.ip, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_fup;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_pip(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_pip(&packet->payload.pip, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_pip;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_ovf(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ (void) decoder;
+
+ if (!packet)
+ return -pte_internal;
+
+ packet->type = ppt_ovf;
+ packet->size = ptps_ovf;
+
+ return ptps_ovf;
+}
+
+int pt_pkt_decode_mode(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_mode(&packet->payload.mode, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_mode;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_psbend(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ (void) decoder;
+
+ if (!packet)
+ return -pte_internal;
+
+ packet->type = ppt_psbend;
+ packet->size = ptps_psbend;
+
+ return ptps_psbend;
+}
+
+int pt_pkt_decode_tsc(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_tsc(&packet->payload.tsc, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tsc;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_cbr(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_cbr(&packet->payload.cbr, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_cbr;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_tma(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_tma(&packet->payload.tma, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tma;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_mtc(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_mtc(&packet->payload.mtc, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_mtc;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_cyc(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_cyc(&packet->payload.cyc, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_cyc;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_stop(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ (void) decoder;
+
+ if (!packet)
+ return -pte_internal;
+
+ packet->type = ppt_stop;
+ packet->size = ptps_stop;
+
+ return ptps_stop;
+}
+
+int pt_pkt_decode_vmcs(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_vmcs(&packet->payload.vmcs, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_vmcs;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_mnt(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_mnt(&packet->payload.mnt, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_mnt;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_exstop(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_exstop(&packet->payload.exstop, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_exstop;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_mwait(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_mwait(&packet->payload.mwait, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_mwait;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_pwre(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_pwre(&packet->payload.pwre, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_pwre;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_pwrx(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_pwrx(&packet->payload.pwrx, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_pwrx;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_ptw(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_ptw(&packet->payload.ptw, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_ptw;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
diff --git a/libipt/src/pt_query_decoder.c b/libipt/src/pt_query_decoder.c
new file mode 100644
index 000000000000..1941ae4d2f47
--- /dev/null
+++ b/libipt/src/pt_query_decoder.c
@@ -0,0 +1,3630 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_query_decoder.h"
+#include "pt_sync.h"
+#include "pt_decoder_function.h"
+#include "pt_packet.h"
+#include "pt_packet_decoder.h"
+#include "pt_config.h"
+#include "pt_opcodes.h"
+#include "pt_compiler.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+
+
+/* Find a FUP in a PSB+ header.
+ *
+ * The packet @decoder must be synchronized onto the trace stream at the
+ * beginning or somewhere inside a PSB+ header.
+ *
+ * It uses @packet to hold trace packets during its search. If the search is
+ * successful, @packet will contain the first (and hopefully only) FUP packet in
+ * this PSB+. Otherwise, @packet may contain anything.
+ *
+ * Returns one if a FUP packet is found (@packet will contain it).
+ * Returns zero if no FUP packet is found (@packet is undefined).
+ * Returns a negative error code otherwise.
+ */
+static int pt_qry_find_header_fup(struct pt_packet *packet,
+ struct pt_packet_decoder *decoder)
+{
+ if (!packet || !decoder)
+ return -pte_internal;
+
+ for (;;) {
+ int errcode;
+
+ errcode = pt_pkt_next(decoder, packet, sizeof(*packet));
+ if (errcode < 0)
+ return errcode;
+
+ switch (packet->type) {
+ default:
+ /* Ignore the packet. */
+ break;
+
+ case ppt_psbend:
+ /* There's no FUP in here. */
+ return 0;
+
+ case ppt_fup:
+ /* Found it. */
+ return 1;
+ }
+ }
+}
+
+int pt_qry_decoder_init(struct pt_query_decoder *decoder,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ memset(decoder, 0, sizeof(*decoder));
+
+ errcode = pt_config_from_user(&decoder->config, config);
+ if (errcode < 0)
+ return errcode;
+
+ pt_last_ip_init(&decoder->ip);
+ pt_tnt_cache_init(&decoder->tnt);
+ pt_time_init(&decoder->time);
+ pt_time_init(&decoder->last_time);
+ pt_tcal_init(&decoder->tcal);
+ pt_evq_init(&decoder->evq);
+
+ return 0;
+}
+
+struct pt_query_decoder *pt_qry_alloc_decoder(const struct pt_config *config)
+{
+ struct pt_query_decoder *decoder;
+ int errcode;
+
+ decoder = malloc(sizeof(*decoder));
+ if (!decoder)
+ return NULL;
+
+ errcode = pt_qry_decoder_init(decoder, config);
+ if (errcode < 0) {
+ free(decoder);
+ return NULL;
+ }
+
+ return decoder;
+}
+
+void pt_qry_decoder_fini(struct pt_query_decoder *decoder)
+{
+ (void) decoder;
+
+ /* Nothing to do. */
+}
+
+void pt_qry_free_decoder(struct pt_query_decoder *decoder)
+{
+ pt_qry_decoder_fini(decoder);
+ free(decoder);
+}
+
+static void pt_qry_reset(struct pt_query_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ decoder->enabled = 0;
+ decoder->consume_packet = 0;
+ decoder->event = NULL;
+
+ pt_last_ip_init(&decoder->ip);
+ pt_tnt_cache_init(&decoder->tnt);
+ pt_time_init(&decoder->time);
+ pt_time_init(&decoder->last_time);
+ pt_tcal_init(&decoder->tcal);
+ pt_evq_init(&decoder->evq);
+}
+
+static int pt_qry_will_event(const struct pt_query_decoder *decoder)
+{
+ const struct pt_decoder_function *dfun;
+
+ if (!decoder)
+ return -pte_internal;
+
+ dfun = decoder->next;
+ if (!dfun)
+ return 0;
+
+ if (dfun->flags & pdff_event)
+ return 1;
+
+ if (dfun->flags & pdff_psbend)
+ return pt_evq_pending(&decoder->evq, evb_psbend);
+
+ if (dfun->flags & pdff_tip)
+ return pt_evq_pending(&decoder->evq, evb_tip);
+
+ if (dfun->flags & pdff_fup)
+ return pt_evq_pending(&decoder->evq, evb_fup);
+
+ return 0;
+}
+
+static int pt_qry_will_eos(const struct pt_query_decoder *decoder)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ dfun = decoder->next;
+ if (dfun)
+ return 0;
+
+ /* The decoding function may be NULL for two reasons:
+ *
+ * - we ran out of trace
+ * - we ran into a fetch error such as -pte_bad_opc
+ *
+ * Let's fetch again.
+ */
+ errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config);
+ return errcode == -pte_eos;
+}
+
+static int pt_qry_status_flags(const struct pt_query_decoder *decoder)
+{
+ int flags = 0;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* Some packets force out TNT and any deferred TIPs in order to
+ * establish the correct context for the subsequent packet.
+ *
+ * Users are expected to first navigate to the correct code region
+ * by using up the cached TNT bits before interpreting any subsequent
+ * packets.
+ *
+ * We do need to read ahead in order to signal upcoming events. We may
+ * have already decoded those packets while our user has not navigated
+ * to the correct code region, yet.
+ *
+ * In order to have our user use up the cached TNT bits first, we do
+ * not indicate the next event until the TNT cache is empty.
+ */
+ if (pt_tnt_cache_is_empty(&decoder->tnt)) {
+ if (pt_qry_will_event(decoder))
+ flags |= pts_event_pending;
+
+ if (pt_qry_will_eos(decoder))
+ flags |= pts_eos;
+ }
+
+ return flags;
+}
+
+static int pt_qry_provoke_fetch_error(const struct pt_query_decoder *decoder)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* Repeat the decoder fetch to reproduce the error. */
+ errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ /* We must get some error or something's wrong. */
+ return -pte_internal;
+}
+
+static int pt_qry_read_ahead(struct pt_query_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ for (;;) {
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ errcode = pt_df_fetch(&decoder->next, decoder->pos,
+ &decoder->config);
+ if (errcode)
+ return errcode;
+
+ dfun = decoder->next;
+ if (!dfun)
+ return -pte_internal;
+
+ if (!dfun->decode)
+ return -pte_internal;
+
+ /* We're done once we reach
+ *
+ * - a branching related packet. */
+ if (dfun->flags & (pdff_tip | pdff_tnt))
+ return 0;
+
+ /* - an event related packet. */
+ if (pt_qry_will_event(decoder))
+ return 0;
+
+ /* Decode status update packets. */
+ errcode = dfun->decode(decoder);
+ if (errcode) {
+ /* Ignore truncated status packets at the end.
+ *
+ * Move beyond the packet and clear @decoder->next to
+ * indicate that we were not able to fetch the next
+ * packet.
+ */
+ if (errcode == -pte_eos) {
+ decoder->pos = decoder->config.end;
+ decoder->next = NULL;
+ }
+
+ return errcode;
+ }
+ }
+}
+
+static int pt_qry_start(struct pt_query_decoder *decoder, const uint8_t *pos,
+ uint64_t *addr)
+{
+ const struct pt_decoder_function *dfun;
+ int status, errcode;
+
+ if (!decoder || !pos)
+ return -pte_invalid;
+
+ pt_qry_reset(decoder);
+
+ decoder->sync = pos;
+ decoder->pos = pos;
+
+ errcode = pt_df_fetch(&decoder->next, pos, &decoder->config);
+ if (errcode)
+ return errcode;
+
+ dfun = decoder->next;
+
+ /* We do need to start at a PSB in order to initialize the state. */
+ if (dfun != &pt_decode_psb)
+ return -pte_nosync;
+
+ /* Decode the PSB+ header to initialize the state. */
+ errcode = dfun->decode(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ /* Fill in the start address.
+ * We do this before reading ahead since the latter may read an
+ * adjacent PSB+ that might change the decoder's IP, causing us
+ * to skip code.
+ */
+ if (addr) {
+ status = pt_last_ip_query(addr, &decoder->ip);
+
+ /* Make sure we don't clobber it later on. */
+ if (!status)
+ addr = NULL;
+ }
+
+ /* Read ahead until the first query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ /* We return the current decoder status. */
+ status = pt_qry_status_flags(decoder);
+ if (status < 0)
+ return status;
+
+ errcode = pt_last_ip_query(addr, &decoder->ip);
+ if (errcode < 0) {
+ /* Indicate the missing IP in the status. */
+ if (addr)
+ status |= pts_ip_suppressed;
+ }
+
+ return status;
+}
+
+static int pt_qry_apply_tsc(struct pt_time *time, struct pt_time_cal *tcal,
+ const struct pt_packet_tsc *packet,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_update_tsc(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_tsc(time, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+static int pt_qry_apply_header_tsc(struct pt_time *time,
+ struct pt_time_cal *tcal,
+ const struct pt_packet_tsc *packet,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_header_tsc(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_tsc(time, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+static int pt_qry_apply_cbr(struct pt_time *time, struct pt_time_cal *tcal,
+ const struct pt_packet_cbr *packet,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_update_cbr(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_cbr(time, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+static int pt_qry_apply_header_cbr(struct pt_time *time,
+ struct pt_time_cal *tcal,
+ const struct pt_packet_cbr *packet,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_header_cbr(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_cbr(time, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+static int pt_qry_apply_tma(struct pt_time *time, struct pt_time_cal *tcal,
+ const struct pt_packet_tma *packet,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_update_tma(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_tma(time, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+static int pt_qry_apply_mtc(struct pt_time *time, struct pt_time_cal *tcal,
+ const struct pt_packet_mtc *packet,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_update_mtc(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_mtc(time, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+static int pt_qry_apply_cyc(struct pt_time *time, struct pt_time_cal *tcal,
+ const struct pt_packet_cyc *packet,
+ const struct pt_config *config)
+{
+ uint64_t fcr;
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_update_cyc(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We need the FastCounter to Cycles ratio below. Fall back to
+ * an invalid ratio of 0 if calibration has not kicked in, yet.
+ *
+ * This will be tracked as packet loss in struct pt_time.
+ */
+ errcode = pt_tcal_fcr(&fcr, tcal);
+ if (errcode < 0) {
+ if (errcode == -pte_no_time)
+ fcr = 0ull;
+ else
+ return errcode;
+ }
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_cyc(time, packet, config, fcr);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+int pt_qry_sync_forward(struct pt_query_decoder *decoder, uint64_t *ip)
+{
+ const uint8_t *pos, *sync;
+ int errcode;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ sync = decoder->sync;
+ pos = decoder->pos;
+ if (!pos)
+ pos = decoder->config.begin;
+
+ if (pos == sync)
+ pos += ptps_psb;
+
+ errcode = pt_sync_forward(&sync, pos, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ return pt_qry_start(decoder, sync, ip);
+}
+
+int pt_qry_sync_backward(struct pt_query_decoder *decoder, uint64_t *ip)
+{
+ const uint8_t *start, *sync;
+ int errcode;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ start = decoder->pos;
+ if (!start)
+ start = decoder->config.end;
+
+ sync = start;
+ for (;;) {
+ errcode = pt_sync_backward(&sync, sync, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_qry_start(decoder, sync, ip);
+ if (errcode < 0) {
+ /* Ignore incomplete trace segments at the end. We need
+ * a full PSB+ to start decoding.
+ */
+ if (errcode == -pte_eos)
+ continue;
+
+ return errcode;
+ }
+
+ /* An empty trace segment in the middle of the trace might bring
+ * us back to where we started.
+ *
+ * We're done when we reached a new position.
+ */
+ if (decoder->pos != start)
+ break;
+ }
+
+ return 0;
+}
+
+int pt_qry_sync_set(struct pt_query_decoder *decoder, uint64_t *ip,
+ uint64_t offset)
+{
+ const uint8_t *sync, *pos;
+ int errcode;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ pos = decoder->config.begin + offset;
+
+ errcode = pt_sync_set(&sync, pos, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ return pt_qry_start(decoder, sync, ip);
+}
+
+int pt_qry_get_offset(const struct pt_query_decoder *decoder, uint64_t *offset)
+{
+ const uint8_t *begin, *pos;
+
+ if (!decoder || !offset)
+ return -pte_invalid;
+
+ begin = decoder->config.begin;
+ pos = decoder->pos;
+
+ if (!pos)
+ return -pte_nosync;
+
+ *offset = pos - begin;
+ return 0;
+}
+
+int pt_qry_get_sync_offset(const struct pt_query_decoder *decoder,
+ uint64_t *offset)
+{
+ const uint8_t *begin, *sync;
+
+ if (!decoder || !offset)
+ return -pte_invalid;
+
+ begin = decoder->config.begin;
+ sync = decoder->sync;
+
+ if (!sync)
+ return -pte_nosync;
+
+ *offset = sync - begin;
+ return 0;
+}
+
+const struct pt_config *
+pt_qry_get_config(const struct pt_query_decoder *decoder)
+{
+ if (!decoder)
+ return NULL;
+
+ return &decoder->config;
+}
+
+static int pt_qry_cache_tnt(struct pt_query_decoder *decoder)
+{
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ for (;;) {
+ const struct pt_decoder_function *dfun;
+
+ dfun = decoder->next;
+ if (!dfun)
+ return pt_qry_provoke_fetch_error(decoder);
+
+ if (!dfun->decode)
+ return -pte_internal;
+
+ /* There's an event ahead of us. */
+ if (pt_qry_will_event(decoder))
+ return -pte_bad_query;
+
+ /* Diagnose a TIP that has not been part of an event. */
+ if (dfun->flags & pdff_tip)
+ return -pte_bad_query;
+
+ /* Clear the decoder's current event so we know when we
+ * accidentally skipped an event.
+ */
+ decoder->event = NULL;
+
+ /* Apply the decoder function. */
+ errcode = dfun->decode(decoder);
+ if (errcode)
+ return errcode;
+
+ /* If we skipped an event, we're in trouble. */
+ if (decoder->event)
+ return -pte_event_ignored;
+
+ /* We're done when we decoded a TNT packet. */
+ if (dfun->flags & pdff_tnt)
+ break;
+
+ /* Read ahead until the next query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if (errcode)
+ return errcode;
+ }
+
+ /* Preserve the time at the TNT packet. */
+ decoder->last_time = decoder->time;
+
+ /* Read ahead until the next query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if ((errcode < 0) && (errcode != -pte_eos))
+ return errcode;
+
+ return 0;
+}
+
+int pt_qry_cond_branch(struct pt_query_decoder *decoder, int *taken)
+{
+ int errcode, query;
+
+ if (!decoder || !taken)
+ return -pte_invalid;
+
+ /* We cache the latest tnt packet in the decoder. Let's re-fill the
+ * cache in case it is empty.
+ */
+ if (pt_tnt_cache_is_empty(&decoder->tnt)) {
+ errcode = pt_qry_cache_tnt(decoder);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ query = pt_tnt_cache_query(&decoder->tnt);
+ if (query < 0)
+ return query;
+
+ *taken = query;
+
+ return pt_qry_status_flags(decoder);
+}
+
+int pt_qry_indirect_branch(struct pt_query_decoder *decoder, uint64_t *addr)
+{
+ int errcode, flags;
+
+ if (!decoder || !addr)
+ return -pte_invalid;
+
+ flags = 0;
+ for (;;) {
+ const struct pt_decoder_function *dfun;
+
+ dfun = decoder->next;
+ if (!dfun)
+ return pt_qry_provoke_fetch_error(decoder);
+
+ if (!dfun->decode)
+ return -pte_internal;
+
+ /* There's an event ahead of us. */
+ if (pt_qry_will_event(decoder))
+ return -pte_bad_query;
+
+ /* Clear the decoder's current event so we know when we
+ * accidentally skipped an event.
+ */
+ decoder->event = NULL;
+
+ /* We may see a single TNT packet if the current tnt is empty.
+ *
+ * If we see a TNT while the current tnt is not empty, it means
+ * that our user got out of sync. Let's report no data and hope
+ * that our user is able to re-sync.
+ */
+ if ((dfun->flags & pdff_tnt) &&
+ !pt_tnt_cache_is_empty(&decoder->tnt))
+ return -pte_bad_query;
+
+ /* Apply the decoder function. */
+ errcode = dfun->decode(decoder);
+ if (errcode)
+ return errcode;
+
+ /* If we skipped an event, we're in trouble. */
+ if (decoder->event)
+ return -pte_event_ignored;
+
+ /* We're done when we found a TIP packet that isn't part of an
+ * event.
+ */
+ if (dfun->flags & pdff_tip) {
+ uint64_t ip;
+
+ /* We already decoded it, so the branch destination
+ * is stored in the decoder's last ip.
+ */
+ errcode = pt_last_ip_query(&ip, &decoder->ip);
+ if (errcode < 0)
+ flags |= pts_ip_suppressed;
+ else
+ *addr = ip;
+
+ break;
+ }
+
+ /* Read ahead until the next query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if (errcode)
+ return errcode;
+ }
+
+ /* Preserve the time at the TIP packet. */
+ decoder->last_time = decoder->time;
+
+ /* Read ahead until the next query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if ((errcode < 0) && (errcode != -pte_eos))
+ return errcode;
+
+ flags |= pt_qry_status_flags(decoder);
+
+ return flags;
+}
+
+int pt_qry_event(struct pt_query_decoder *decoder, struct pt_event *event,
+ size_t size)
+{
+ int errcode, flags;
+
+ if (!decoder || !event)
+ return -pte_invalid;
+
+ if (size < offsetof(struct pt_event, variant))
+ return -pte_invalid;
+
+ /* We do not allow querying for events while there are still TNT
+ * bits to consume.
+ */
+ if (!pt_tnt_cache_is_empty(&decoder->tnt))
+ return -pte_bad_query;
+
+ /* Do not provide more than we actually have. */
+ if (sizeof(*event) < size)
+ size = sizeof(*event);
+
+ flags = 0;
+ for (;;) {
+ const struct pt_decoder_function *dfun;
+
+ dfun = decoder->next;
+ if (!dfun)
+ return pt_qry_provoke_fetch_error(decoder);
+
+ if (!dfun->decode)
+ return -pte_internal;
+
+ /* We must not see a TIP or TNT packet unless it belongs
+ * to an event.
+ *
+ * If we see one, it means that our user got out of sync.
+ * Let's report no data and hope that our user is able
+ * to re-sync.
+ */
+ if ((dfun->flags & (pdff_tip | pdff_tnt)) &&
+ !pt_qry_will_event(decoder))
+ return -pte_bad_query;
+
+ /* Clear the decoder's current event so we know when decoding
+ * produces a new event.
+ */
+ decoder->event = NULL;
+
+ /* Apply any other decoder function. */
+ errcode = dfun->decode(decoder);
+ if (errcode)
+ return errcode;
+
+ /* Check if there has been an event.
+ *
+ * Some packets may result in events in some but not in all
+ * configurations.
+ */
+ if (decoder->event) {
+ (void) memcpy(event, decoder->event, size);
+ break;
+ }
+
+ /* Read ahead until the next query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if (errcode)
+ return errcode;
+ }
+
+ /* Preserve the time at the event. */
+ decoder->last_time = decoder->time;
+
+ /* Read ahead until the next query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if ((errcode < 0) && (errcode != -pte_eos))
+ return errcode;
+
+ flags |= pt_qry_status_flags(decoder);
+
+ return flags;
+}
+
+int pt_qry_time(struct pt_query_decoder *decoder, uint64_t *time,
+ uint32_t *lost_mtc, uint32_t *lost_cyc)
+{
+ if (!decoder || !time)
+ return -pte_invalid;
+
+ return pt_time_query_tsc(time, lost_mtc, lost_cyc, &decoder->last_time);
+}
+
+int pt_qry_core_bus_ratio(struct pt_query_decoder *decoder, uint32_t *cbr)
+{
+ if (!decoder || !cbr)
+ return -pte_invalid;
+
+ return pt_time_query_cbr(cbr, &decoder->last_time);
+}
+
+static int pt_qry_event_time(struct pt_event *event,
+ const struct pt_query_decoder *decoder)
+{
+ int errcode;
+
+ if (!event || !decoder)
+ return -pte_internal;
+
+ errcode = pt_time_query_tsc(&event->tsc, &event->lost_mtc,
+ &event->lost_cyc, &decoder->time);
+ if (errcode < 0) {
+ if (errcode != -pte_no_time)
+ return errcode;
+ } else
+ event->has_tsc = 1;
+
+ return 0;
+}
+
+int pt_qry_decode_unknown(struct pt_query_decoder *decoder)
+{
+ struct pt_packet packet;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_unknown(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_pad(struct pt_query_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->pos += ptps_pad;
+
+ return 0;
+}
+
+static int pt_qry_read_psb_header(struct pt_query_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ pt_last_ip_init(&decoder->ip);
+
+ for (;;) {
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ errcode = pt_df_fetch(&decoder->next, decoder->pos,
+ &decoder->config);
+ if (errcode)
+ return errcode;
+
+ dfun = decoder->next;
+ if (!dfun)
+ return -pte_internal;
+
+ /* We're done once we reach an psbend packet. */
+ if (dfun->flags & pdff_psbend)
+ return 0;
+
+ if (!dfun->header)
+ return -pte_bad_context;
+
+ errcode = dfun->header(decoder);
+ if (errcode)
+ return errcode;
+ }
+}
+
+int pt_qry_decode_psb(struct pt_query_decoder *decoder)
+{
+ const uint8_t *pos;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ pos = decoder->pos;
+
+ size = pt_pkt_read_psb(pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ decoder->pos += size;
+
+ errcode = pt_qry_read_psb_header(decoder);
+ if (errcode < 0) {
+ /* Move back to the PSB so we have a chance to recover and
+ * continue decoding.
+ */
+ decoder->pos = pos;
+
+ /* Clear any PSB+ events that have already been queued. */
+ (void) pt_evq_clear(&decoder->evq, evb_psbend);
+
+ /* Reset the decoder's decode function. */
+ decoder->next = &pt_decode_psb;
+
+ return errcode;
+ }
+
+ /* The next packet following the PSB header will be of type PSBEND.
+ *
+ * Decoding this packet will publish the PSB events what have been
+ * accumulated while reading the PSB header.
+ */
+ return 0;
+}
+
+static int pt_qry_event_ip(uint64_t *ip, struct pt_event *event,
+ const struct pt_query_decoder *decoder)
+{
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ errcode = pt_last_ip_query(ip, &decoder->ip);
+ if (errcode < 0) {
+ switch (pt_errcode(errcode)) {
+ case pte_noip:
+ case pte_ip_suppressed:
+ event->ip_suppressed = 1;
+ break;
+
+ default:
+ return errcode;
+ }
+ }
+
+ return 0;
+}
+
+/* Decode a generic IP packet.
+ *
+ * Returns the number of bytes read, on success.
+ * Returns -pte_eos if the ip does not fit into the buffer.
+ * Returns -pte_bad_packet if the ip compression is not known.
+ */
+static int pt_qry_decode_ip(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_ip packet;
+ int errcode, size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ /* We do not update the decoder's position, yet. */
+
+ return size;
+}
+
+static int pt_qry_consume_tip(struct pt_query_decoder *decoder, int size)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->pos += size;
+ return 0;
+}
+
+static int pt_qry_event_tip(struct pt_event *ev,
+ struct pt_query_decoder *decoder)
+{
+ if (!ev || !decoder)
+ return -pte_internal;
+
+ switch (ev->type) {
+ case ptev_async_branch:
+ decoder->consume_packet = 1;
+
+ return pt_qry_event_ip(&ev->variant.async_branch.to, ev,
+ decoder);
+
+ case ptev_async_paging:
+ return pt_qry_event_ip(&ev->variant.async_paging.ip, ev,
+ decoder);
+
+ case ptev_async_vmcs:
+ return pt_qry_event_ip(&ev->variant.async_vmcs.ip, ev,
+ decoder);
+
+ case ptev_exec_mode:
+ return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev,
+ decoder);
+
+ default:
+ break;
+ }
+
+ return -pte_bad_context;
+}
+
+int pt_qry_decode_tip(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_qry_decode_ip(decoder);
+ if (size < 0)
+ return size;
+
+ /* Process any pending events binding to TIP. */
+ ev = pt_evq_dequeue(&decoder->evq, evb_tip);
+ if (ev) {
+ errcode = pt_qry_event_tip(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ /* Publish the event. */
+ decoder->event = ev;
+
+ /* Process further pending events. */
+ if (pt_evq_pending(&decoder->evq, evb_tip))
+ return 0;
+
+ /* No further events.
+ *
+ * If none of the events consumed the packet, we're done.
+ */
+ if (!decoder->consume_packet)
+ return 0;
+
+ /* We're done with this packet. Clear the flag we set previously
+ * and consume it.
+ */
+ decoder->consume_packet = 0;
+ }
+
+ return pt_qry_consume_tip(decoder, size);
+}
+
+int pt_qry_decode_tnt_8(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_tnt packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_tnt_8(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_tnt_64(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_tnt packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_tnt_64(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+static int pt_qry_consume_tip_pge(struct pt_query_decoder *decoder, int size)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->pos += size;
+ return 0;
+}
+
+static int pt_qry_event_tip_pge(struct pt_event *ev,
+ const struct pt_query_decoder *decoder)
+{
+ if (!ev)
+ return -pte_internal;
+
+ switch (ev->type) {
+ case ptev_exec_mode:
+ return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, decoder);
+
+ default:
+ break;
+ }
+
+ return -pte_bad_context;
+}
+
+int pt_qry_decode_tip_pge(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_qry_decode_ip(decoder);
+ if (size < 0)
+ return size;
+
+ /* We send the enable event first. This is more convenient for our users
+ * and does not require them to either store or blindly apply other
+ * events that might be pending.
+ *
+ * We use the consume packet decoder flag to indicate this.
+ */
+ if (!decoder->consume_packet) {
+ /* This packet signals a standalone enabled event. */
+ ev = pt_evq_standalone(&decoder->evq);
+ if (!ev)
+ return -pte_internal;
+
+ ev->type = ptev_enabled;
+
+ /* We can't afford having a suppressed IP here. */
+ errcode = pt_last_ip_query(&ev->variant.enabled.ip,
+ &decoder->ip);
+ if (errcode < 0)
+ return -pte_bad_packet;
+
+ errcode = pt_qry_event_time(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ /* Discard any cached TNT bits.
+ *
+ * They should have been consumed at the corresponding disable
+ * event. If they have not, for whatever reason, discard them
+ * now so our user does not get out of sync.
+ */
+ pt_tnt_cache_init(&decoder->tnt);
+
+ /* Process pending events next. */
+ decoder->consume_packet = 1;
+ decoder->enabled = 1;
+ } else {
+ /* Process any pending events binding to TIP. */
+ ev = pt_evq_dequeue(&decoder->evq, evb_tip);
+ if (ev) {
+ errcode = pt_qry_event_tip_pge(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+ }
+ }
+
+ /* We must have an event. Either the initial enable event or one of the
+ * queued events.
+ */
+ if (!ev)
+ return -pte_internal;
+
+ /* Publish the event. */
+ decoder->event = ev;
+
+ /* Process further pending events. */
+ if (pt_evq_pending(&decoder->evq, evb_tip))
+ return 0;
+
+ /* We must consume the packet. */
+ if (!decoder->consume_packet)
+ return -pte_internal;
+
+ decoder->consume_packet = 0;
+
+ return pt_qry_consume_tip_pge(decoder, size);
+}
+
+static int pt_qry_consume_tip_pgd(struct pt_query_decoder *decoder, int size)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->enabled = 0;
+ decoder->pos += size;
+ return 0;
+}
+
+static int pt_qry_event_tip_pgd(struct pt_event *ev,
+ const struct pt_query_decoder *decoder)
+{
+ if (!ev)
+ return -pte_internal;
+
+ switch (ev->type) {
+ case ptev_async_branch: {
+ uint64_t at;
+
+ /* Turn the async branch into an async disable. */
+ at = ev->variant.async_branch.from;
+
+ ev->type = ptev_async_disabled;
+ ev->variant.async_disabled.at = at;
+
+ return pt_qry_event_ip(&ev->variant.async_disabled.ip, ev,
+ decoder);
+ }
+
+ case ptev_async_paging:
+ case ptev_async_vmcs:
+ case ptev_exec_mode:
+ /* These events are ordered after the async disable event. It
+ * is not quite clear what IP to give them.
+ *
+ * If we give them the async disable's source IP, we'd make an
+ * error if the IP is updated when applying the async disable
+ * event.
+ *
+ * If we give them the async disable's destination IP, we'd make
+ * an error if the IP is not updated when applying the async
+ * disable event. That's what our decoders do since tracing is
+ * likely to resume from there.
+ *
+ * In all cases, tracing will be disabled when those events are
+ * applied, so we may as well suppress the IP.
+ */
+ ev->ip_suppressed = 1;
+
+ return 0;
+
+ default:
+ break;
+ }
+
+ return -pte_bad_context;
+}
+
+int pt_qry_decode_tip_pgd(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_qry_decode_ip(decoder);
+ if (size < 0)
+ return size;
+
+ /* Process any pending events binding to TIP. */
+ ev = pt_evq_dequeue(&decoder->evq, evb_tip);
+ if (ev) {
+ errcode = pt_qry_event_tip_pgd(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+ } else {
+ /* This packet signals a standalone disabled event. */
+ ev = pt_evq_standalone(&decoder->evq);
+ if (!ev)
+ return -pte_internal;
+ ev->type = ptev_disabled;
+
+ errcode = pt_qry_event_ip(&ev->variant.disabled.ip, ev,
+ decoder);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_qry_event_time(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ /* We must have an event. Either the initial enable event or one of the
+ * queued events.
+ */
+ if (!ev)
+ return -pte_internal;
+
+ /* Publish the event. */
+ decoder->event = ev;
+
+ /* Process further pending events. */
+ if (pt_evq_pending(&decoder->evq, evb_tip))
+ return 0;
+
+ return pt_qry_consume_tip_pgd(decoder, size);
+}
+
+static int pt_qry_consume_fup(struct pt_query_decoder *decoder, int size)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->pos += size;
+ return 0;
+}
+
+static int scan_for_erratum_bdm70(struct pt_packet_decoder *decoder)
+{
+ for (;;) {
+ struct pt_packet packet;
+ int errcode;
+
+ errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
+ if (errcode < 0) {
+ /* Running out of packets is not an error. */
+ if (errcode == -pte_eos)
+ errcode = 0;
+
+ return errcode;
+ }
+
+ switch (packet.type) {
+ default:
+ /* All other packets cancel our search.
+ *
+ * We do not enumerate those packets since we also
+ * want to include new packets.
+ */
+ return 0;
+
+ case ppt_tip_pge:
+ /* We found it - the erratum applies. */
+ return 1;
+
+ case ppt_pad:
+ case ppt_tsc:
+ case ppt_cbr:
+ case ppt_psbend:
+ case ppt_pip:
+ case ppt_mode:
+ case ppt_vmcs:
+ case ppt_tma:
+ case ppt_mtc:
+ case ppt_cyc:
+ case ppt_mnt:
+ /* Intentionally skip a few packets. */
+ continue;
+ }
+ }
+}
+
+static int check_erratum_bdm70(const uint8_t *pos,
+ const struct pt_config *config)
+{
+ struct pt_packet_decoder decoder;
+ int errcode;
+
+ if (!pos || !config)
+ return -pte_internal;
+
+ errcode = pt_pkt_decoder_init(&decoder, config);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_pkt_sync_set(&decoder, (uint64_t) (pos - config->begin));
+ if (errcode >= 0)
+ errcode = scan_for_erratum_bdm70(&decoder);
+
+ pt_pkt_decoder_fini(&decoder);
+ return errcode;
+}
+
+int pt_qry_header_fup(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_ip packet;
+ int errcode, size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ if (decoder->config.errata.bdm70 && !decoder->enabled) {
+ errcode = check_erratum_bdm70(decoder->pos + size,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ if (errcode)
+ return pt_qry_consume_fup(decoder, size);
+ }
+
+ errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ /* Tracing is enabled if we have an IP in the header. */
+ if (packet.ipc != pt_ipc_suppressed)
+ decoder->enabled = 1;
+
+ return pt_qry_consume_fup(decoder, size);
+}
+
+static int pt_qry_event_fup(struct pt_event *ev,
+ struct pt_query_decoder *decoder)
+{
+ if (!ev || !decoder)
+ return -pte_internal;
+
+ switch (ev->type) {
+ case ptev_overflow:
+ decoder->consume_packet = 1;
+
+ /* We can't afford having a suppressed IP here. */
+ return pt_last_ip_query(&ev->variant.overflow.ip,
+ &decoder->ip);
+
+ case ptev_tsx:
+ if (!(ev->variant.tsx.aborted))
+ decoder->consume_packet = 1;
+
+ return pt_qry_event_ip(&ev->variant.tsx.ip, ev, decoder);
+
+ case ptev_exstop:
+ decoder->consume_packet = 1;
+
+ return pt_qry_event_ip(&ev->variant.exstop.ip, ev, decoder);
+
+ case ptev_mwait:
+ decoder->consume_packet = 1;
+
+ return pt_qry_event_ip(&ev->variant.mwait.ip, ev, decoder);
+
+ case ptev_ptwrite:
+ decoder->consume_packet = 1;
+
+ return pt_qry_event_ip(&ev->variant.ptwrite.ip, ev, decoder);
+
+ default:
+ break;
+ }
+
+ return -pte_internal;
+}
+
+int pt_qry_decode_fup(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_qry_decode_ip(decoder);
+ if (size < 0)
+ return size;
+
+ /* Process any pending events binding to FUP. */
+ ev = pt_evq_dequeue(&decoder->evq, evb_fup);
+ if (ev) {
+ errcode = pt_qry_event_fup(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ /* Publish the event. */
+ decoder->event = ev;
+
+ /* Process further pending events. */
+ if (pt_evq_pending(&decoder->evq, evb_fup))
+ return 0;
+
+ /* No further events.
+ *
+ * If none of the events consumed the packet, we're done.
+ */
+ if (!decoder->consume_packet)
+ return 0;
+
+ /* We're done with this packet. Clear the flag we set previously
+ * and consume it.
+ */
+ decoder->consume_packet = 0;
+ } else {
+ /* FUP indicates an async branch event; it binds to TIP.
+ *
+ * We do need an IP in this case.
+ */
+ uint64_t ip;
+
+ errcode = pt_last_ip_query(&ip, &decoder->ip);
+ if (errcode < 0)
+ return errcode;
+
+ ev = pt_evq_enqueue(&decoder->evq, evb_tip);
+ if (!ev)
+ return -pte_nomem;
+
+ ev->type = ptev_async_branch;
+ ev->variant.async_branch.from = ip;
+
+ errcode = pt_qry_event_time(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return pt_qry_consume_fup(decoder, size);
+}
+
+int pt_qry_decode_pip(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_pip packet;
+ struct pt_event *event;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ /* Paging events are either standalone or bind to the same TIP packet
+ * as an in-flight async branch event.
+ */
+ event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch);
+ if (!event) {
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+ event->type = ptev_paging;
+ event->variant.paging.cr3 = packet.cr3;
+ event->variant.paging.non_root = packet.nr;
+
+ decoder->event = event;
+ } else {
+ event = pt_evq_enqueue(&decoder->evq, evb_tip);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_async_paging;
+ event->variant.async_paging.cr3 = packet.cr3;
+ event->variant.async_paging.non_root = packet.nr;
+ }
+
+ errcode = pt_qry_event_time(event, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_header_pip(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_pip packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ /* Paging events are reported at the end of the PSB. */
+ event = pt_evq_enqueue(&decoder->evq, evb_psbend);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_async_paging;
+ event->variant.async_paging.cr3 = packet.cr3;
+ event->variant.async_paging.non_root = packet.nr;
+
+ decoder->pos += size;
+ return 0;
+}
+
+static int pt_qry_event_psbend(struct pt_event *ev,
+ struct pt_query_decoder *decoder)
+{
+ int errcode;
+
+ if (!ev || !decoder)
+ return -pte_internal;
+
+ /* PSB+ events are status updates. */
+ ev->status_update = 1;
+
+ errcode = pt_qry_event_time(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ switch (ev->type) {
+ case ptev_async_paging:
+ return pt_qry_event_ip(&ev->variant.async_paging.ip, ev,
+ decoder);
+
+ case ptev_exec_mode:
+ return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, decoder);
+
+ case ptev_tsx:
+ return pt_qry_event_ip(&ev->variant.tsx.ip, ev, decoder);
+
+ case ptev_async_vmcs:
+ return pt_qry_event_ip(&ev->variant.async_vmcs.ip, ev,
+ decoder);
+
+ case ptev_cbr:
+ return 0;
+
+ case ptev_mnt:
+ /* Maintenance packets may appear anywhere. Do not mark them as
+ * status updates even if they appear in PSB+.
+ */
+ ev->status_update = 0;
+ return 0;
+
+ default:
+ break;
+ }
+
+ return -pte_internal;
+}
+
+static int pt_qry_process_pending_psb_events(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = pt_evq_dequeue(&decoder->evq, evb_psbend);
+ if (!ev)
+ return 0;
+
+ errcode = pt_qry_event_psbend(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ /* Publish the event. */
+ decoder->event = ev;
+
+ /* Signal a pending event. */
+ return 1;
+}
+
+/* Create a standalone overflow event with tracing disabled.
+ *
+ * Creates and published the event and disables tracing in @decoder.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_qry_event_ovf_disabled(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = pt_evq_standalone(&decoder->evq);
+ if (!ev)
+ return -pte_internal;
+
+ ev->type = ptev_overflow;
+
+ /* We suppress the IP to indicate that tracing has been disabled before
+ * the overflow resolved. There can be several events before tracing is
+ * enabled again.
+ */
+ ev->ip_suppressed = 1;
+
+ decoder->enabled = 0;
+ decoder->event = ev;
+
+ return pt_qry_event_time(ev, decoder);
+}
+
+/* Queues an overflow event with tracing enabled.
+ *
+ * Creates and enqueues the event and enables tracing in @decoder.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_qry_event_ovf_enabled(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = pt_evq_enqueue(&decoder->evq, evb_fup);
+ if (!ev)
+ return -pte_internal;
+
+ ev->type = ptev_overflow;
+
+ decoder->enabled = 1;
+
+ return pt_qry_event_time(ev, decoder);
+}
+
+/* Recover from SKD010.
+ *
+ * Creates and publishes an overflow event at @packet's IP payload.
+ *
+ * Further updates @decoder as follows:
+ *
+ * - set time tracking to @time and @tcal
+ * - set the position to @offset
+ * - set ip to @packet's IP payload
+ * - set tracing to be enabled
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int skd010_recover(struct pt_query_decoder *decoder,
+ const struct pt_packet_ip *packet,
+ const struct pt_time_cal *tcal,
+ const struct pt_time *time, uint64_t offset)
+{
+ struct pt_last_ip ip;
+ struct pt_event *ev;
+ int errcode;
+
+ if (!decoder || !packet || !tcal || !time)
+ return -pte_internal;
+
+ /* We use the decoder's IP. It should be newly initialized. */
+ ip = decoder->ip;
+
+ /* Extract the IP payload from the packet. */
+ errcode = pt_last_ip_update_ip(&ip, packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ /* Synthesize the overflow event. */
+ ev = pt_evq_standalone(&decoder->evq);
+ if (!ev)
+ return -pte_internal;
+
+ ev->type = ptev_overflow;
+
+ /* We do need a full IP. */
+ errcode = pt_last_ip_query(&ev->variant.overflow.ip, &ip);
+ if (errcode < 0)
+ return -pte_bad_context;
+
+ /* We continue decoding at the given offset. */
+ decoder->pos = decoder->config.begin + offset;
+
+ /* Tracing is enabled. */
+ decoder->enabled = 1;
+ decoder->ip = ip;
+
+ decoder->time = *time;
+ decoder->tcal = *tcal;
+
+ /* Publish the event. */
+ decoder->event = ev;
+
+ return pt_qry_event_time(ev, decoder);
+}
+
+/* Recover from SKD010 with tracing disabled.
+ *
+ * Creates and publishes a standalone overflow event.
+ *
+ * Further updates @decoder as follows:
+ *
+ * - set time tracking to @time and @tcal
+ * - set the position to @offset
+ * - set tracing to be disabled
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int skd010_recover_disabled(struct pt_query_decoder *decoder,
+ const struct pt_time_cal *tcal,
+ const struct pt_time *time, uint64_t offset)
+{
+ if (!decoder || !tcal || !time)
+ return -pte_internal;
+
+ decoder->time = *time;
+ decoder->tcal = *tcal;
+
+ /* We continue decoding at the given offset. */
+ decoder->pos = decoder->config.begin + offset;
+
+ return pt_qry_event_ovf_disabled(decoder);
+}
+
+/* Scan ahead for a packet at which to resume after an overflow.
+ *
+ * This function is called after an OVF without a corresponding FUP. This
+ * normally means that the overflow resolved while tracing was disabled.
+ *
+ * With erratum SKD010 it might also mean that the FUP (or TIP.PGE) was dropped.
+ * The overflow thus resolved while tracing was enabled (or tracing was enabled
+ * after the overflow resolved). Search for an indication whether tracing is
+ * enabled or disabled by scanning upcoming packets.
+ *
+ * If we can confirm that tracing is disabled, the erratum does not apply and we
+ * can continue normally.
+ *
+ * If we can confirm that tracing is enabled, the erratum applies and we try to
+ * recover by synchronizing at a later packet and a different IP. If we can't
+ * recover, pretend the erratum didn't apply so we run into the error later.
+ * Since this assumes that tracing is disabled, no harm should be done, i.e. no
+ * bad trace should be generated.
+ *
+ * Returns zero if the overflow is handled.
+ * Returns a positive value if the overflow is not yet handled.
+ * Returns a negative error code otherwise.
+ */
+static int skd010_scan_for_ovf_resume(struct pt_packet_decoder *pkt,
+ struct pt_query_decoder *decoder)
+{
+ struct pt_time_cal tcal;
+ struct pt_time time;
+ struct {
+ struct pt_time_cal tcal;
+ struct pt_time time;
+ uint64_t offset;
+ } mode_tsx;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* Keep track of time as we skip packets. */
+ time = decoder->time;
+ tcal = decoder->tcal;
+
+ /* Keep track of a potential recovery point at MODE.TSX. */
+ memset(&mode_tsx, 0, sizeof(mode_tsx));
+
+ for (;;) {
+ struct pt_packet packet;
+ uint64_t offset;
+
+ errcode = pt_pkt_get_offset(pkt, &offset);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
+ if (errcode < 0) {
+ /* Let's assume the trace is correct if we run out
+ * of packets.
+ */
+ if (errcode == -pte_eos)
+ errcode = 1;
+
+ return errcode;
+ }
+
+ switch (packet.type) {
+ case ppt_tip_pge:
+ /* Everything is fine. There is nothing to do. */
+ return 1;
+
+ case ppt_tip_pgd:
+ /* This is a clear indication that the erratum
+ * applies.
+ *
+ * We synchronize after the disable.
+ */
+ return skd010_recover_disabled(decoder, &tcal, &time,
+ offset + packet.size);
+
+ case ppt_tnt_8:
+ case ppt_tnt_64:
+ /* This is a clear indication that the erratum
+ * apllies.
+ *
+ * Yet, we can't recover from it as we wouldn't know how
+ * many TNT bits will have been used when we eventually
+ * find an IP packet at which to resume tracing.
+ */
+ return 1;
+
+ case ppt_pip:
+ case ppt_vmcs:
+ /* We could track those changes and synthesize extra
+ * events after the overflow event when recovering from
+ * the erratum. This requires infrastructure that we
+ * don't currently have, though, so we're not going to
+ * do it.
+ *
+ * Instead, we ignore those changes. We already don't
+ * know how many other changes were lost in the
+ * overflow.
+ */
+ break;
+
+ case ppt_mode:
+ switch (packet.payload.mode.leaf) {
+ case pt_mol_exec:
+ /* A MODE.EXEC packet binds to TIP, i.e.
+ *
+ * TIP.PGE: everything is fine
+ * TIP: the erratum applies
+ *
+ * In the TIP.PGE case, we may just follow the
+ * normal code flow.
+ *
+ * In the TIP case, we'd be able to re-sync at
+ * the TIP IP but have to skip packets up to and
+ * including the TIP.
+ *
+ * We'd need to synthesize the MODE.EXEC event
+ * after the overflow event when recovering at
+ * the TIP. We lack the infrastructure for this
+ * - it's getting too complicated.
+ *
+ * Instead, we ignore the execution mode change;
+ * we already don't know how many more such
+ * changes were lost in the overflow.
+ */
+ break;
+
+ case pt_mol_tsx:
+ /* A MODE.TSX packet may be standalone or bind
+ * to FUP.
+ *
+ * If this is the second MODE.TSX, we're sure
+ * that tracing is disabled and everything is
+ * fine.
+ */
+ if (mode_tsx.offset)
+ return 1;
+
+ /* If we find the FUP this packet binds to, we
+ * may recover at the FUP IP and restart
+ * processing packets from here. Remember the
+ * current state.
+ */
+ mode_tsx.offset = offset;
+ mode_tsx.time = time;
+ mode_tsx.tcal = tcal;
+
+ break;
+ }
+
+ break;
+
+ case ppt_fup:
+ /* This is a pretty good indication that tracing
+ * is indeed enabled and the erratum applies.
+ */
+
+ /* If we got a MODE.TSX packet before, we synchronize at
+ * the FUP IP but continue decoding packets starting
+ * from the MODE.TSX.
+ */
+ if (mode_tsx.offset)
+ return skd010_recover(decoder,
+ &packet.payload.ip,
+ &mode_tsx.tcal,
+ &mode_tsx.time,
+ mode_tsx.offset);
+
+ /* Without a preceding MODE.TSX, this FUP is the start
+ * of an async branch or disable. We synchronize at the
+ * FUP IP and continue decoding packets from here.
+ */
+ return skd010_recover(decoder, &packet.payload.ip,
+ &tcal, &time, offset);
+
+ case ppt_tip:
+ /* We syhchronize at the TIP IP and continue decoding
+ * packets after the TIP packet.
+ */
+ return skd010_recover(decoder, &packet.payload.ip,
+ &tcal, &time,
+ offset + packet.size);
+
+ case ppt_psb:
+ /* We reached a synchronization point. Tracing is
+ * enabled if and only if the PSB+ contains a FUP.
+ */
+ errcode = pt_qry_find_header_fup(&packet, pkt);
+ if (errcode < 0) {
+ /* If we ran out of packets, we can't tell.
+ * Let's assume the trace is correct.
+ */
+ if (errcode == -pte_eos)
+ errcode = 1;
+
+ return errcode;
+ }
+
+ /* If there is no FUP, tracing is disabled and
+ * everything is fine.
+ */
+ if (!errcode)
+ return 1;
+
+ /* We should have a FUP. */
+ if (packet.type != ppt_fup)
+ return -pte_internal;
+
+ /* Otherwise, we may synchronize at the FUP IP and
+ * continue decoding packets at the PSB.
+ */
+ return skd010_recover(decoder, &packet.payload.ip,
+ &tcal, &time, offset);
+
+ case ppt_psbend:
+ /* We shouldn't see this. */
+ return -pte_bad_context;
+
+ case ppt_ovf:
+ case ppt_stop:
+ /* It doesn't matter if it had been enabled or disabled
+ * before. We may resume normally.
+ */
+ return 1;
+
+ case ppt_unknown:
+ case ppt_invalid:
+ /* We can't skip this packet. */
+ return 1;
+
+ case ppt_pad:
+ case ppt_mnt:
+ case ppt_pwre:
+ case ppt_pwrx:
+ /* Ignore this packet. */
+ break;
+
+ case ppt_exstop:
+ /* We may skip a stand-alone EXSTOP. */
+ if (!packet.payload.exstop.ip)
+ break;
+
+ fallthrough;
+ case ppt_mwait:
+ /* To skip this packet, we'd need to take care of the
+ * FUP it binds to. This is getting complicated.
+ */
+ return 1;
+
+ case ppt_ptw:
+ /* We may skip a stand-alone PTW. */
+ if (!packet.payload.ptw.ip)
+ break;
+
+ /* To skip this packet, we'd need to take care of the
+ * FUP it binds to. This is getting complicated.
+ */
+ return 1;
+
+ case ppt_tsc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_tsc(&time, &tcal,
+ &packet.payload.tsc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_cbr:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_cbr(&time, &tcal,
+ &packet.payload.cbr,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_tma:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_tma(&time, &tcal,
+ &packet.payload.tma,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_mtc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_mtc(&time, &tcal,
+ &packet.payload.mtc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_cyc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_cyc(&time, &tcal,
+ &packet.payload.cyc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+ }
+ }
+}
+
+static int pt_qry_handle_skd010(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_decoder pkt;
+ uint64_t offset;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ errcode = pt_qry_get_offset(decoder, &offset);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_pkt_decoder_init(&pkt, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_pkt_sync_set(&pkt, offset);
+ if (errcode >= 0)
+ errcode = skd010_scan_for_ovf_resume(&pkt, decoder);
+
+ pt_pkt_decoder_fini(&pkt);
+ return errcode;
+}
+
+/* Scan ahead for an indication whether tracing is enabled or disabled.
+ *
+ * Returns zero if tracing is clearly disabled.
+ * Returns a positive integer if tracing is enabled or if we can't tell.
+ * Returns a negative error code otherwise.
+ */
+static int apl12_tracing_is_disabled(struct pt_packet_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ for (;;) {
+ struct pt_packet packet;
+ int status;
+
+ status = pt_pkt_next(decoder, &packet, sizeof(packet));
+ if (status < 0) {
+ /* Running out of packets is not an error. */
+ if (status == -pte_eos)
+ status = 1;
+
+ return status;
+ }
+
+ switch (packet.type) {
+ default:
+ /* Skip other packets. */
+ break;
+
+ case ppt_stop:
+ /* Tracing is disabled before a stop. */
+ return 0;
+
+ case ppt_tip_pge:
+ /* Tracing gets enabled - it must have been disabled. */
+ return 0;
+
+ case ppt_tnt_8:
+ case ppt_tnt_64:
+ case ppt_tip:
+ case ppt_tip_pgd:
+ /* Those packets are only generated when tracing is
+ * enabled. We're done.
+ */
+ return 1;
+
+ case ppt_psb:
+ /* We reached a synchronization point. Tracing is
+ * enabled if and only if the PSB+ contains a FUP.
+ */
+ status = pt_qry_find_header_fup(&packet, decoder);
+
+ /* If we ran out of packets, we can't tell. */
+ if (status == -pte_eos)
+ status = 1;
+
+ return status;
+
+ case ppt_psbend:
+ /* We shouldn't see this. */
+ return -pte_bad_context;
+
+ case ppt_ovf:
+ /* It doesn't matter - we run into the next overflow. */
+ return 1;
+
+ case ppt_unknown:
+ case ppt_invalid:
+ /* We can't skip this packet. */
+ return 1;
+ }
+ }
+}
+
+/* Apply workaround for erratum APL12.
+ *
+ * We resume from @offset (relative to @decoder->pos) with tracing disabled. On
+ * our way to the resume location we process packets to update our state.
+ *
+ * Any event will be dropped.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int apl12_resume_disabled(struct pt_query_decoder *decoder,
+ struct pt_packet_decoder *pkt,
+ unsigned int offset)
+{
+ uint64_t begin, end;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ errcode = pt_qry_get_offset(decoder, &begin);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_pkt_sync_set(pkt, begin);
+ if (errcode < 0)
+ return errcode;
+
+ end = begin + offset;
+ for (;;) {
+ struct pt_packet packet;
+ uint64_t next;
+
+ errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
+ if (errcode < 0) {
+ /* Running out of packets is not an error. */
+ if (errcode == -pte_eos)
+ errcode = 0;
+
+ return errcode;
+ }
+
+ /* The offset is the start of the next packet. */
+ errcode = pt_pkt_get_offset(pkt, &next);
+ if (errcode < 0)
+ return errcode;
+
+ /* We're done when we reach @offset.
+ *
+ * The current @packet will be the FUP after which we started
+ * our search. We skip it.
+ *
+ * Check that we're not accidentally proceeding past @offset.
+ */
+ if (end <= next) {
+ if (end < next)
+ return -pte_internal;
+
+ break;
+ }
+
+ switch (packet.type) {
+ default:
+ /* Skip other packets. */
+ break;
+
+ case ppt_mode:
+ case ppt_pip:
+ case ppt_vmcs:
+ /* We should not encounter those.
+ *
+ * We should not encounter a lot of packets but those
+ * are state-relevant; let's check them explicitly.
+ */
+ return -pte_internal;
+
+ case ppt_tsc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_tsc(&decoder->time,
+ &decoder->tcal,
+ &packet.payload.tsc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_cbr:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_cbr(&decoder->time,
+ &decoder->tcal,
+ &packet.payload.cbr,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_tma:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_tma(&decoder->time,
+ &decoder->tcal,
+ &packet.payload.tma,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_mtc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_mtc(&decoder->time,
+ &decoder->tcal,
+ &packet.payload.mtc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_cyc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_cyc(&decoder->time,
+ &decoder->tcal,
+ &packet.payload.cyc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+ }
+ }
+
+ decoder->pos += offset;
+
+ return pt_qry_event_ovf_disabled(decoder);
+}
+
+/* Handle erratum APL12.
+ *
+ * This function is called when a FUP is found after an OVF. The @offset
+ * argument gives the relative offset from @decoder->pos to after the FUP.
+ *
+ * A FUP after OVF normally indicates that the overflow resolved while tracing
+ * is enabled. Due to erratum APL12, however, the overflow may have resolved
+ * while tracing is disabled and still generate a FUP.
+ *
+ * We scan ahead for an indication whether tracing is actually disabled. If we
+ * find one, the erratum applies and we proceed from after the FUP packet.
+ *
+ * This will drop any CBR or MTC events. We will update @decoder's timing state
+ * on CBR but drop the event.
+ *
+ * Returns zero if the erratum was handled.
+ * Returns a positive integer if the erratum was not handled.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int pt_qry_handle_apl12(struct pt_query_decoder *decoder,
+ unsigned int offset)
+{
+ struct pt_packet_decoder pkt;
+ uint64_t here;
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_get_offset(decoder, &here);
+ if (status < 0)
+ return status;
+
+ status = pt_pkt_decoder_init(&pkt, &decoder->config);
+ if (status < 0)
+ return status;
+
+ status = pt_pkt_sync_set(&pkt, here + offset);
+ if (status >= 0) {
+ status = apl12_tracing_is_disabled(&pkt);
+ if (!status)
+ status = apl12_resume_disabled(decoder, &pkt, offset);
+ }
+
+ pt_pkt_decoder_fini(&pkt);
+ return status;
+}
+
+/* Apply workaround for erratum APL11.
+ *
+ * We search for a TIP.PGD and, if we found one, resume from after that packet
+ * with tracing disabled. On our way to the resume location we process packets
+ * to update our state.
+ *
+ * If we don't find a TIP.PGD but instead some other packet that indicates that
+ * tracing is disabled, indicate that the erratum does not apply.
+ *
+ * Any event will be dropped.
+ *
+ * Returns zero if the erratum was handled.
+ * Returns a positive integer if the erratum was not handled.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int apl11_apply(struct pt_query_decoder *decoder,
+ struct pt_packet_decoder *pkt)
+{
+ struct pt_time_cal tcal;
+ struct pt_time time;
+
+ if (!decoder)
+ return -pte_internal;
+
+ time = decoder->time;
+ tcal = decoder->tcal;
+ for (;;) {
+ struct pt_packet packet;
+ int errcode;
+
+ errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
+ if (errcode < 0)
+ return errcode;
+
+ switch (packet.type) {
+ case ppt_tip_pgd: {
+ uint64_t offset;
+
+ /* We found a TIP.PGD. The erratum applies.
+ *
+ * Resume from here with tracing disabled.
+ */
+ errcode = pt_pkt_get_offset(pkt, &offset);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->time = time;
+ decoder->tcal = tcal;
+ decoder->pos = decoder->config.begin + offset;
+
+ return pt_qry_event_ovf_disabled(decoder);
+ }
+
+ case ppt_invalid:
+ return -pte_bad_opc;
+
+ case ppt_fup:
+ case ppt_psb:
+ case ppt_tip_pge:
+ case ppt_stop:
+ case ppt_ovf:
+ case ppt_mode:
+ case ppt_pip:
+ case ppt_vmcs:
+ case ppt_exstop:
+ case ppt_mwait:
+ case ppt_pwre:
+ case ppt_pwrx:
+ case ppt_ptw:
+ /* The erratum does not apply. */
+ return 1;
+
+ case ppt_unknown:
+ case ppt_pad:
+ case ppt_mnt:
+ /* Skip those packets. */
+ break;
+
+ case ppt_psbend:
+ case ppt_tip:
+ case ppt_tnt_8:
+ case ppt_tnt_64:
+ return -pte_bad_context;
+
+
+ case ppt_tsc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_tsc(&time, &tcal,
+ &packet.payload.tsc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_cbr:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_cbr(&time, &tcal,
+ &packet.payload.cbr,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_tma:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_tma(&time, &tcal,
+ &packet.payload.tma,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_mtc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_mtc(&time, &tcal,
+ &packet.payload.mtc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_cyc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_cyc(&time, &tcal,
+ &packet.payload.cyc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+ }
+ }
+}
+
+/* Handle erratum APL11.
+ *
+ * This function is called when we diagnose a bad packet while searching for a
+ * FUP after an OVF.
+ *
+ * Due to erratum APL11 we may get an extra TIP.PGD after the OVF. Find that
+ * TIP.PGD and resume from there with tracing disabled.
+ *
+ * This will drop any CBR or MTC events. We will update @decoder's timing state
+ * on CBR but drop the event.
+ *
+ * Returns zero if the erratum was handled.
+ * Returns a positive integer if the erratum was not handled.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int pt_qry_handle_apl11(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_decoder pkt;
+ uint64_t offset;
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_get_offset(decoder, &offset);
+ if (status < 0)
+ return status;
+
+ status = pt_pkt_decoder_init(&pkt, &decoder->config);
+ if (status < 0)
+ return status;
+
+ status = pt_pkt_sync_set(&pkt, offset);
+ if (status >= 0)
+ status = apl11_apply(decoder, &pkt);
+
+ pt_pkt_decoder_fini(&pkt);
+ return status;
+}
+
+static int pt_pkt_find_ovf_fup(struct pt_packet_decoder *decoder)
+{
+ for (;;) {
+ struct pt_packet packet;
+ int errcode;
+
+ errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
+ if (errcode < 0)
+ return errcode;
+
+ switch (packet.type) {
+ case ppt_fup:
+ return 1;
+
+ case ppt_invalid:
+ return -pte_bad_opc;
+
+ case ppt_unknown:
+ case ppt_pad:
+ case ppt_mnt:
+ case ppt_cbr:
+ case ppt_tsc:
+ case ppt_tma:
+ case ppt_mtc:
+ case ppt_cyc:
+ continue;
+
+ case ppt_psb:
+ case ppt_tip_pge:
+ case ppt_mode:
+ case ppt_pip:
+ case ppt_vmcs:
+ case ppt_stop:
+ case ppt_ovf:
+ case ppt_exstop:
+ case ppt_mwait:
+ case ppt_pwre:
+ case ppt_pwrx:
+ case ppt_ptw:
+ return 0;
+
+ case ppt_psbend:
+ case ppt_tip:
+ case ppt_tip_pgd:
+ case ppt_tnt_8:
+ case ppt_tnt_64:
+ return -pte_bad_context;
+ }
+ }
+}
+
+/* Find a FUP to which the current OVF may bind.
+ *
+ * Scans the trace for a FUP or for a packet that indicates that tracing is
+ * disabled.
+ *
+ * Return the relative offset of the packet following the found FUP on success.
+ * Returns zero if no FUP is found and tracing is assumed to be disabled.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int pt_qry_find_ovf_fup(const struct pt_query_decoder *decoder)
+{
+ struct pt_packet_decoder pkt;
+ uint64_t begin, end, offset;
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_get_offset(decoder, &begin);
+ if (status < 0)
+ return status;
+
+ status = pt_pkt_decoder_init(&pkt, &decoder->config);
+ if (status < 0)
+ return status;
+
+ status = pt_pkt_sync_set(&pkt, begin);
+ if (status >= 0) {
+ status = pt_pkt_find_ovf_fup(&pkt);
+ if (status > 0) {
+ status = pt_pkt_get_offset(&pkt, &end);
+ if (status < 0)
+ return status;
+
+ if (end <= begin)
+ return -pte_overflow;
+
+ offset = end - begin;
+ if (INT_MAX < offset)
+ return -pte_overflow;
+
+ status = (int) offset;
+ }
+ }
+
+ pt_pkt_decoder_fini(&pkt);
+ return status;
+}
+
+int pt_qry_decode_ovf(struct pt_query_decoder *decoder)
+{
+ struct pt_time time;
+ int status, offset;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_process_pending_psb_events(decoder);
+ if (status < 0)
+ return status;
+
+ /* If we have any pending psbend events, we're done for now. */
+ if (status)
+ return 0;
+
+ /* Reset the decoder state but preserve timing. */
+ time = decoder->time;
+ pt_qry_reset(decoder);
+ decoder->time = time;
+
+ /* We must consume the OVF before we search for the binding packet. */
+ decoder->pos += ptps_ovf;
+
+ /* Overflow binds to either FUP or TIP.PGE.
+ *
+ * If the overflow can be resolved while PacketEn=1 it binds to FUP. We
+ * can see timing packets between OVF anf FUP but that's it.
+ *
+ * Otherwise, PacketEn will be zero when the overflow resolves and OVF
+ * binds to TIP.PGE. There can be packets between OVF and TIP.PGE that
+ * do not depend on PacketEn.
+ *
+ * We don't need to decode everything until TIP.PGE, however. As soon
+ * as we see a non-timing non-FUP packet, we know that tracing has been
+ * disabled before the overflow resolves.
+ */
+ offset = pt_qry_find_ovf_fup(decoder);
+ if (offset <= 0) {
+ /* Check for erratum SKD010.
+ *
+ * The FUP may have been dropped. If we can figure out that
+ * tracing is enabled and hence the FUP is missing, we resume
+ * at a later packet and a different IP.
+ */
+ if (decoder->config.errata.skd010) {
+ status = pt_qry_handle_skd010(decoder);
+ if (status <= 0)
+ return status;
+ }
+
+ /* Check for erratum APL11.
+ *
+ * We may have gotten an extra TIP.PGD, which should be
+ * diagnosed by our search for a subsequent FUP.
+ */
+ if (decoder->config.errata.apl11 &&
+ (offset == -pte_bad_context)) {
+ status = pt_qry_handle_apl11(decoder);
+ if (status <= 0)
+ return status;
+ }
+
+ /* Report the original error from searching for the FUP packet
+ * if we were not able to fix the trace.
+ *
+ * We treat an overflow at the end of the trace as standalone.
+ */
+ if (offset < 0 && offset != -pte_eos)
+ return offset;
+
+ return pt_qry_event_ovf_disabled(decoder);
+ } else {
+ /* Check for erratum APL12.
+ *
+ * We may get an extra FUP even though the overflow resolved
+ * with tracing disabled.
+ */
+ if (decoder->config.errata.apl12) {
+ status = pt_qry_handle_apl12(decoder,
+ (unsigned int) offset);
+ if (status <= 0)
+ return status;
+ }
+
+ return pt_qry_event_ovf_enabled(decoder);
+ }
+}
+
+static int pt_qry_decode_mode_exec(struct pt_query_decoder *decoder,
+ const struct pt_packet_mode_exec *packet)
+{
+ struct pt_event *event;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ /* MODE.EXEC binds to TIP. */
+ event = pt_evq_enqueue(&decoder->evq, evb_tip);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_exec_mode;
+ event->variant.exec_mode.mode = pt_get_exec_mode(packet);
+
+ return pt_qry_event_time(event, decoder);
+}
+
+static int pt_qry_decode_mode_tsx(struct pt_query_decoder *decoder,
+ const struct pt_packet_mode_tsx *packet)
+{
+ struct pt_event *event;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ /* MODE.TSX is standalone if tracing is disabled. */
+ if (!decoder->enabled) {
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ /* We don't have an IP in this case. */
+ event->variant.tsx.ip = 0;
+ event->ip_suppressed = 1;
+
+ /* Publish the event. */
+ decoder->event = event;
+ } else {
+ /* MODE.TSX binds to FUP. */
+ event = pt_evq_enqueue(&decoder->evq, evb_fup);
+ if (!event)
+ return -pte_nomem;
+ }
+
+ event->type = ptev_tsx;
+ event->variant.tsx.speculative = packet->intx;
+ event->variant.tsx.aborted = packet->abrt;
+
+ return pt_qry_event_time(event, decoder);
+}
+
+int pt_qry_decode_mode(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_mode packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = 0;
+ switch (packet.leaf) {
+ case pt_mol_exec:
+ errcode = pt_qry_decode_mode_exec(decoder, &packet.bits.exec);
+ break;
+
+ case pt_mol_tsx:
+ errcode = pt_qry_decode_mode_tsx(decoder, &packet.bits.tsx);
+ break;
+ }
+
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_header_mode(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_mode packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ /* Inside the header, events are reported at the end. */
+ event = pt_evq_enqueue(&decoder->evq, evb_psbend);
+ if (!event)
+ return -pte_nomem;
+
+ switch (packet.leaf) {
+ case pt_mol_exec:
+ event->type = ptev_exec_mode;
+ event->variant.exec_mode.mode =
+ pt_get_exec_mode(&packet.bits.exec);
+ break;
+
+ case pt_mol_tsx:
+ event->type = ptev_tsx;
+ event->variant.tsx.speculative = packet.bits.tsx.intx;
+ event->variant.tsx.aborted = packet.bits.tsx.abrt;
+ break;
+ }
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_psbend(struct pt_query_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_process_pending_psb_events(decoder);
+ if (status < 0)
+ return status;
+
+ /* If we had any psb events, we're done for now. */
+ if (status)
+ return 0;
+
+ /* Skip the psbend extended opcode that we fetched before if no more
+ * psbend events are pending.
+ */
+ decoder->pos += ptps_psbend;
+ return 0;
+}
+
+int pt_qry_decode_tsc(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_tsc packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_qry_apply_tsc(&decoder->time, &decoder->tcal,
+ &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_header_tsc(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_tsc packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_qry_apply_header_tsc(&decoder->time, &decoder->tcal,
+ &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_cbr(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_cbr packet;
+ struct pt_event *event;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_qry_apply_cbr(&decoder->time, &decoder->tcal,
+ &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_cbr;
+ event->variant.cbr.ratio = packet.ratio;
+
+ decoder->event = event;
+
+ errcode = pt_qry_event_time(event, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_header_cbr(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_cbr packet;
+ struct pt_event *event;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_qry_apply_header_cbr(&decoder->time, &decoder->tcal,
+ &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ event = pt_evq_enqueue(&decoder->evq, evb_psbend);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_cbr;
+ event->variant.cbr.ratio = packet.ratio;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_tma(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_tma packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_tma(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_qry_apply_tma(&decoder->time, &decoder->tcal,
+ &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_mtc(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_mtc packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_mtc(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_qry_apply_mtc(&decoder->time, &decoder->tcal,
+ &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+static int check_erratum_skd007(struct pt_query_decoder *decoder,
+ const struct pt_packet_cyc *packet, int size)
+{
+ const uint8_t *pos;
+ uint16_t payload;
+
+ if (!decoder || !packet || size < 0)
+ return -pte_internal;
+
+ /* It must be a 2-byte CYC. */
+ if (size != 2)
+ return 0;
+
+ payload = (uint16_t) packet->value;
+
+ /* The 2nd byte of the CYC payload must look like an ext opcode. */
+ if ((payload & ~0x1f) != 0x20)
+ return 0;
+
+ /* Skip this CYC packet. */
+ pos = decoder->pos + size;
+ if (decoder->config.end <= pos)
+ return 0;
+
+ /* See if we got a second CYC that looks like an OVF ext opcode. */
+ if (*pos != pt_ext_ovf)
+ return 0;
+
+ /* We shouldn't get back-to-back CYCs unless they are sent when the
+ * counter wraps around. In this case, we'd expect a full payload.
+ *
+ * Since we got two non-full CYC packets, we assume the erratum hit.
+ */
+
+ return 1;
+}
+
+int pt_qry_decode_cyc(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_cyc packet;
+ struct pt_config *config;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ config = &decoder->config;
+
+ size = pt_pkt_read_cyc(&packet, decoder->pos, config);
+ if (size < 0)
+ return size;
+
+ if (config->errata.skd007) {
+ errcode = check_erratum_skd007(decoder, &packet, size);
+ if (errcode < 0)
+ return errcode;
+
+ /* If the erratum hits, we ignore the partial CYC and instead
+ * process the OVF following/overlapping it.
+ */
+ if (errcode) {
+ /* We skip the first byte of the CYC, which brings us
+ * to the beginning of the OVF packet.
+ */
+ decoder->pos += 1;
+ return 0;
+ }
+ }
+
+ errcode = pt_qry_apply_cyc(&decoder->time, &decoder->tcal,
+ &packet, config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_stop(struct pt_query_decoder *decoder)
+{
+ struct pt_event *event;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* Stop events are reported immediately. */
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_stop;
+
+ decoder->event = event;
+
+ errcode = pt_qry_event_time(event, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += ptps_stop;
+ return 0;
+}
+
+int pt_qry_header_vmcs(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_vmcs packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ event = pt_evq_enqueue(&decoder->evq, evb_psbend);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_async_vmcs;
+ event->variant.async_vmcs.base = packet.base;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_vmcs(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_vmcs packet;
+ struct pt_event *event;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ /* VMCS events bind to the same IP as an in-flight async paging event.
+ *
+ * In that case, the VMCS event should be applied first. We reorder
+ * events here to simplify the life of higher layers.
+ */
+ event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_paging);
+ if (event) {
+ struct pt_event *paging;
+
+ paging = pt_evq_enqueue(&decoder->evq, evb_tip);
+ if (!paging)
+ return -pte_nomem;
+
+ *paging = *event;
+
+ event->type = ptev_async_vmcs;
+ event->variant.async_vmcs.base = packet.base;
+
+ decoder->pos += size;
+ return 0;
+ }
+
+ /* VMCS events bind to the same TIP packet as an in-flight async
+ * branch event.
+ */
+ event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch);
+ if (event) {
+ event = pt_evq_enqueue(&decoder->evq, evb_tip);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_async_vmcs;
+ event->variant.async_vmcs.base = packet.base;
+
+ decoder->pos += size;
+ return 0;
+ }
+
+ /* VMCS events that do not bind to an in-flight async event are
+ * stand-alone.
+ */
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_vmcs;
+ event->variant.vmcs.base = packet.base;
+
+ decoder->event = event;
+
+ errcode = pt_qry_event_time(event, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_mnt(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_mnt packet;
+ struct pt_event *event;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_mnt(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_mnt;
+ event->variant.mnt.payload = packet.payload;
+
+ decoder->event = event;
+
+ errcode = pt_qry_event_time(event, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+
+ return 0;
+}
+
+int pt_qry_header_mnt(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_mnt packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_mnt(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ event = pt_evq_enqueue(&decoder->evq, evb_psbend);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_mnt;
+ event->variant.mnt.payload = packet.payload;
+
+ decoder->pos += size;
+
+ return 0;
+}
+
+int pt_qry_decode_exstop(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_exstop packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_exstop(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ if (packet.ip) {
+ event = pt_evq_enqueue(&decoder->evq, evb_fup);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_exstop;
+ } else {
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_exstop;
+
+ event->ip_suppressed = 1;
+ event->variant.exstop.ip = 0ull;
+
+ decoder->event = event;
+ }
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_mwait(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_mwait packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_mwait(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ event = pt_evq_enqueue(&decoder->evq, evb_fup);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_mwait;
+ event->variant.mwait.hints = packet.hints;
+ event->variant.mwait.ext = packet.ext;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_pwre(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_pwre packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_pwre(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_pwre;
+ event->variant.pwre.state = packet.state;
+ event->variant.pwre.sub_state = packet.sub_state;
+
+ if (packet.hw)
+ event->variant.pwre.hw = 1;
+
+ decoder->event = event;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_pwrx(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_pwrx packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_pwrx(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_pwrx;
+ event->variant.pwrx.last = packet.last;
+ event->variant.pwrx.deepest = packet.deepest;
+
+ if (packet.interrupt)
+ event->variant.pwrx.interrupt = 1;
+ if (packet.store)
+ event->variant.pwrx.store = 1;
+ if (packet.autonomous)
+ event->variant.pwrx.autonomous = 1;
+
+ decoder->event = event;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_ptw(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_ptw packet;
+ struct pt_event *event;
+ int size, pls;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_ptw(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ pls = pt_ptw_size(packet.plc);
+ if (pls < 0)
+ return pls;
+
+ if (packet.ip) {
+ event = pt_evq_enqueue(&decoder->evq, evb_fup);
+ if (!event)
+ return -pte_internal;
+ } else {
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->ip_suppressed = 1;
+
+ decoder->event = event;
+ }
+
+ event->type = ptev_ptwrite;
+ event->variant.ptwrite.size = (uint8_t) pls;
+ event->variant.ptwrite.payload = packet.payload;
+
+ decoder->pos += size;
+ return 0;
+}
diff --git a/libipt/src/pt_retstack.c b/libipt/src/pt_retstack.c
new file mode 100644
index 000000000000..cc568367ed1a
--- /dev/null
+++ b/libipt/src/pt_retstack.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_retstack.h"
+
+#include "intel-pt.h"
+
+
+void pt_retstack_init(struct pt_retstack *retstack)
+{
+ if (!retstack)
+ return;
+
+ retstack->top = 0;
+ retstack->bottom = 0;
+}
+
+int pt_retstack_is_empty(const struct pt_retstack *retstack)
+{
+ if (!retstack)
+ return -pte_invalid;
+
+ return (retstack->top == retstack->bottom ? 1 : 0);
+}
+
+int pt_retstack_pop(struct pt_retstack *retstack, uint64_t *ip)
+{
+ uint8_t top;
+
+ if (!retstack)
+ return -pte_invalid;
+
+ top = retstack->top;
+
+ if (top == retstack->bottom)
+ return -pte_retstack_empty;
+
+ top = (!top ? pt_retstack_size : top - 1);
+
+ retstack->top = top;
+
+ if (ip)
+ *ip = retstack->stack[top];
+
+ return 0;
+}
+
+int pt_retstack_push(struct pt_retstack *retstack, uint64_t ip)
+{
+ uint8_t top, bottom;
+
+ if (!retstack)
+ return -pte_invalid;
+
+ top = retstack->top;
+ bottom = retstack->bottom;
+
+ retstack->stack[top] = ip;
+
+ top = (top == pt_retstack_size ? 0 : top + 1);
+
+ if (bottom == top)
+ bottom = (bottom == pt_retstack_size ? 0 : bottom + 1);
+
+ retstack->top = top;
+ retstack->bottom = bottom;
+
+ return 0;
+}
diff --git a/libipt/src/pt_section.c b/libipt/src/pt_section.c
new file mode 100644
index 000000000000..77bae915fb7b
--- /dev/null
+++ b/libipt/src/pt_section.c
@@ -0,0 +1,643 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_section.h"
+#include "pt_block_cache.h"
+#include "pt_image_section_cache.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+
+static char *dupstr(const char *str)
+{
+ char *dup;
+ size_t len;
+
+ if (!str)
+ return NULL;
+
+ len = strlen(str);
+ dup = malloc(len + 1);
+ if (!dup)
+ return NULL;
+
+ return strcpy(dup, str);
+}
+
+struct pt_section *pt_mk_section(const char *filename, uint64_t offset,
+ uint64_t size)
+{
+ struct pt_section *section;
+ uint64_t fsize;
+ void *status;
+ int errcode;
+
+ errcode = pt_section_mk_status(&status, &fsize, filename);
+ if (errcode < 0)
+ return NULL;
+
+ /* Fail if the requested @offset lies beyond the end of @file. */
+ if (fsize <= offset)
+ goto out_status;
+
+ /* Truncate @size so the entire range lies within @file. */
+ fsize -= offset;
+ if (fsize < size)
+ size = fsize;
+
+ section = malloc(sizeof(*section));
+ if (!section)
+ goto out_status;
+
+ memset(section, 0, sizeof(*section));
+
+ section->filename = dupstr(filename);
+ section->status = status;
+ section->offset = offset;
+ section->size = size;
+ section->ucount = 1;
+
+#if defined(FEATURE_THREADS)
+
+ errcode = mtx_init(&section->lock, mtx_plain);
+ if (errcode != thrd_success) {
+ free(section->filename);
+ free(section);
+ goto out_status;
+ }
+
+ errcode = mtx_init(&section->alock, mtx_plain);
+ if (errcode != thrd_success) {
+ mtx_destroy(&section->lock);
+ free(section->filename);
+ free(section);
+ goto out_status;
+ }
+
+#endif /* defined(FEATURE_THREADS) */
+
+ return section;
+
+out_status:
+ free(status);
+ return NULL;
+}
+
+int pt_section_lock(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&section->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+int pt_section_unlock(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&section->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static void pt_section_free(struct pt_section *section)
+{
+ if (!section)
+ return;
+
+#if defined(FEATURE_THREADS)
+
+ mtx_destroy(&section->alock);
+ mtx_destroy(&section->lock);
+
+#endif /* defined(FEATURE_THREADS) */
+
+ free(section->filename);
+ free(section->status);
+ free(section);
+}
+
+int pt_section_get(struct pt_section *section)
+{
+ uint16_t ucount;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ ucount = section->ucount + 1;
+ if (!ucount) {
+ (void) pt_section_unlock(section);
+ return -pte_overflow;
+ }
+
+ section->ucount = ucount;
+
+ return pt_section_unlock(section);
+}
+
+int pt_section_put(struct pt_section *section)
+{
+ uint16_t ucount, mcount;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ mcount = section->mcount;
+ ucount = section->ucount;
+ if (ucount > 1) {
+ section->ucount = ucount - 1;
+ return pt_section_unlock(section);
+ }
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (!ucount || mcount)
+ return -pte_internal;
+
+ pt_section_free(section);
+ return 0;
+}
+
+static int pt_section_lock_attach(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&section->alock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static int pt_section_unlock_attach(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&section->alock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+int pt_section_attach(struct pt_section *section,
+ struct pt_image_section_cache *iscache)
+{
+ uint16_t acount, ucount;
+ int errcode;
+
+ if (!section || !iscache)
+ return -pte_internal;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ ucount = section->ucount;
+ acount = section->acount;
+ if (!acount) {
+ if (section->iscache || !ucount)
+ goto out_unlock;
+
+ section->iscache = iscache;
+ section->acount = 1;
+
+ return pt_section_unlock_attach(section);
+ }
+
+ acount += 1;
+ if (!acount) {
+ (void) pt_section_unlock_attach(section);
+ return -pte_overflow;
+ }
+
+ if (ucount < acount)
+ goto out_unlock;
+
+ if (section->iscache != iscache)
+ goto out_unlock;
+
+ section->acount = acount;
+
+ return pt_section_unlock_attach(section);
+
+ out_unlock:
+ (void) pt_section_unlock_attach(section);
+ return -pte_internal;
+}
+
+int pt_section_detach(struct pt_section *section,
+ struct pt_image_section_cache *iscache)
+{
+ uint16_t acount, ucount;
+ int errcode;
+
+ if (!section || !iscache)
+ return -pte_internal;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (section->iscache != iscache)
+ goto out_unlock;
+
+ acount = section->acount;
+ if (!acount)
+ goto out_unlock;
+
+ acount -= 1;
+ ucount = section->ucount;
+ if (ucount < acount)
+ goto out_unlock;
+
+ section->acount = acount;
+ if (!acount)
+ section->iscache = NULL;
+
+ return pt_section_unlock_attach(section);
+
+ out_unlock:
+ (void) pt_section_unlock_attach(section);
+ return -pte_internal;
+}
+
+const char *pt_section_filename(const struct pt_section *section)
+{
+ if (!section)
+ return NULL;
+
+ return section->filename;
+}
+
+uint64_t pt_section_size(const struct pt_section *section)
+{
+ if (!section)
+ return 0ull;
+
+ return section->size;
+}
+
+static int pt_section_bcache_memsize(const struct pt_section *section,
+ uint64_t *psize)
+{
+ struct pt_block_cache *bcache;
+
+ if (!section || !psize)
+ return -pte_internal;
+
+ bcache = section->bcache;
+ if (!bcache) {
+ *psize = 0ull;
+ return 0;
+ }
+
+ *psize = sizeof(*bcache) +
+ (bcache->nentries * sizeof(struct pt_bcache_entry));
+
+ return 0;
+}
+
+static int pt_section_memsize_locked(const struct pt_section *section,
+ uint64_t *psize)
+{
+ uint64_t msize, bcsize;
+ int (*memsize)(const struct pt_section *section, uint64_t *size);
+ int errcode;
+
+ if (!section || !psize)
+ return -pte_internal;
+
+ memsize = section->memsize;
+ if (!memsize) {
+ if (section->mcount)
+ return -pte_internal;
+
+ *psize = 0ull;
+ return 0;
+ }
+
+ errcode = memsize(section, &msize);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_bcache_memsize(section, &bcsize);
+ if (errcode < 0)
+ return errcode;
+
+ *psize = msize + bcsize;
+
+ return 0;
+}
+
+int pt_section_memsize(struct pt_section *section, uint64_t *size)
+{
+ int errcode, status;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_section_memsize_locked(section, size);
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+}
+
+uint64_t pt_section_offset(const struct pt_section *section)
+{
+ if (!section)
+ return 0ull;
+
+ return section->offset;
+}
+
+int pt_section_alloc_bcache(struct pt_section *section)
+{
+ struct pt_image_section_cache *iscache;
+ struct pt_block_cache *bcache;
+ uint64_t ssize, memsize;
+ uint32_t csize;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ if (!section->mcount)
+ return -pte_internal;
+
+ ssize = pt_section_size(section);
+ csize = (uint32_t) ssize;
+
+ if (csize != ssize)
+ return -pte_not_supported;
+
+ memsize = 0ull;
+
+ /* We need to take both the attach and the section lock in order to pair
+ * the block cache allocation and the resize notification.
+ *
+ * This allows map notifications in between but they only change the
+ * order of sections in the cache.
+ *
+ * The attach lock needs to be taken first.
+ */
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ goto out_alock;
+
+ bcache = pt_section_bcache(section);
+ if (bcache) {
+ errcode = 0;
+ goto out_lock;
+ }
+
+ bcache = pt_bcache_alloc(csize);
+ if (!bcache) {
+ errcode = -pte_nomem;
+ goto out_lock;
+ }
+
+ /* Install the block cache. It will become visible and may be used
+ * immediately.
+ *
+ * If we fail later on, we leave the block cache and report the error to
+ * the allocating decoder thread.
+ */
+ section->bcache = bcache;
+
+ errcode = pt_section_memsize_locked(section, &memsize);
+ if (errcode < 0)
+ goto out_lock;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ goto out_alock;
+
+ if (memsize) {
+ iscache = section->iscache;
+ if (iscache) {
+ errcode = pt_iscache_notify_resize(iscache, section,
+ memsize);
+ if (errcode < 0)
+ goto out_alock;
+ }
+ }
+
+ return pt_section_unlock_attach(section);
+
+
+out_lock:
+ (void) pt_section_unlock(section);
+
+out_alock:
+ (void) pt_section_unlock_attach(section);
+ return errcode;
+}
+
+int pt_section_on_map_lock(struct pt_section *section)
+{
+ struct pt_image_section_cache *iscache;
+ int errcode, status;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ iscache = section->iscache;
+ if (!iscache)
+ return pt_section_unlock_attach(section);
+
+ /* There is a potential deadlock when @section was unmapped again and
+ * @iscache tries to map it. This would cause this function to be
+ * re-entered while we're still holding the attach lock.
+ *
+ * This scenario is very unlikely, though, since our caller does not yet
+ * know whether pt_section_map() succeeded.
+ */
+ status = pt_iscache_notify_map(iscache, section);
+
+ errcode = pt_section_unlock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+}
+
+int pt_section_map_share(struct pt_section *section)
+{
+ uint16_t mcount;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ mcount = section->mcount;
+ if (!mcount) {
+ (void) pt_section_unlock(section);
+ return -pte_internal;
+ }
+
+ mcount += 1;
+ if (!mcount) {
+ (void) pt_section_unlock(section);
+ return -pte_overflow;
+ }
+
+ section->mcount = mcount;
+
+ return pt_section_unlock(section);
+}
+
+int pt_section_unmap(struct pt_section *section)
+{
+ uint16_t mcount;
+ int errcode, status;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ mcount = section->mcount;
+
+ errcode = -pte_nomap;
+ if (!mcount)
+ goto out_unlock;
+
+ section->mcount = mcount -= 1;
+ if (mcount)
+ return pt_section_unlock(section);
+
+ errcode = -pte_internal;
+ if (!section->unmap)
+ goto out_unlock;
+
+ status = section->unmap(section);
+
+ pt_bcache_free(section->bcache);
+ section->bcache = NULL;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+
+out_unlock:
+ (void) pt_section_unlock(section);
+ return errcode;
+}
+
+int pt_section_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ uint64_t limit, space;
+
+ if (!section)
+ return -pte_internal;
+
+ if (!section->read)
+ return -pte_nomap;
+
+ limit = section->size;
+ if (limit <= offset)
+ return -pte_nomap;
+
+ /* Truncate if we try to read past the end of the section. */
+ space = limit - offset;
+ if (space < size)
+ size = (uint16_t) space;
+
+ return section->read(section, buffer, size, offset);
+}
diff --git a/libipt/src/pt_section_file.c b/libipt/src/pt_section_file.c
new file mode 100644
index 000000000000..299a94eb7d04
--- /dev/null
+++ b/libipt/src/pt_section_file.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_section.h"
+#include "pt_section_file.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+
+static int fmap_init(struct pt_sec_file_mapping *mapping)
+{
+ if (!mapping)
+ return -pte_internal;
+
+ memset(mapping, 0, sizeof(*mapping));
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_init(&mapping->lock, mtx_plain);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static void fmap_fini(struct pt_sec_file_mapping *mapping)
+{
+ if (!mapping)
+ return;
+
+ fclose(mapping->file);
+
+#if defined(FEATURE_THREADS)
+
+ mtx_destroy(&mapping->lock);
+
+#endif /* defined(FEATURE_THREADS) */
+}
+
+static int fmap_lock(struct pt_sec_file_mapping *mapping)
+{
+ if (!mapping)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&mapping->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static int fmap_unlock(struct pt_sec_file_mapping *mapping)
+{
+ if (!mapping)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&mapping->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+int pt_sec_file_map(struct pt_section *section, FILE *file)
+{
+ struct pt_sec_file_mapping *mapping;
+ uint64_t offset, size;
+ long begin, end, fsize;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (mapping)
+ return -pte_internal;
+
+ offset = section->offset;
+ size = section->size;
+
+ begin = (long) offset;
+ end = begin + (long) size;
+
+ /* Check for overflows. */
+ if ((uint64_t) begin != offset)
+ return -pte_bad_image;
+
+ if ((uint64_t) end != (offset + size))
+ return -pte_bad_image;
+
+ if (end < begin)
+ return -pte_bad_image;
+
+ /* Validate that the section lies within the file. */
+ errcode = fseek(file, 0, SEEK_END);
+ if (errcode)
+ return -pte_bad_image;
+
+ fsize = ftell(file);
+ if (fsize < 0)
+ return -pte_bad_image;
+
+ if (fsize < end)
+ return -pte_bad_image;
+
+ mapping = malloc(sizeof(*mapping));
+ if (!mapping)
+ return -pte_nomem;
+
+ errcode = fmap_init(mapping);
+ if (errcode < 0)
+ goto out_mem;
+
+ mapping->file = file;
+ mapping->begin = begin;
+ mapping->end = end;
+
+ section->mapping = mapping;
+ section->unmap = pt_sec_file_unmap;
+ section->read = pt_sec_file_read;
+ section->memsize = pt_sec_file_memsize;
+
+ return 0;
+
+out_mem:
+ free(mapping);
+ return errcode;
+}
+
+int pt_sec_file_unmap(struct pt_section *section)
+{
+ struct pt_sec_file_mapping *mapping;
+
+ if (!section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+
+ if (!mapping || !section->unmap || !section->read || !section->memsize)
+ return -pte_internal;
+
+ section->mapping = NULL;
+ section->unmap = NULL;
+ section->read = NULL;
+ section->memsize = NULL;
+
+ fmap_fini(mapping);
+ free(mapping);
+
+ return 0;
+}
+
+int pt_sec_file_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ struct pt_sec_file_mapping *mapping;
+ FILE *file;
+ long begin;
+ size_t read;
+ int errcode;
+
+ if (!buffer || !section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping)
+ return -pte_internal;
+
+ file = mapping->file;
+
+ /* We already checked in pt_section_read() that the requested memory
+ * lies within the section's boundaries.
+ *
+ * And we checked that the file covers the entire section in
+ * pt_sec_file_map(). There's no need to check for overflows, again.
+ */
+ begin = mapping->begin + (long) offset;
+
+ errcode = fmap_lock(mapping);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = fseek(file, begin, SEEK_SET);
+ if (errcode)
+ goto out_unlock;
+
+ read = fread(buffer, 1, size, file);
+
+ errcode = fmap_unlock(mapping);
+ if (errcode < 0)
+ return errcode;
+
+ return (int) read;
+
+out_unlock:
+ (void) fmap_unlock(mapping);
+ return -pte_nomap;
+}
+
+int pt_sec_file_memsize(const struct pt_section *section, uint64_t *size)
+{
+ if (!section || !size)
+ return -pte_internal;
+
+ if (!section->mapping)
+ return -pte_internal;
+
+ *size = 0ull;
+
+ return 0;
+}
diff --git a/libipt/src/pt_sync.c b/libipt/src/pt_sync.c
new file mode 100644
index 000000000000..cf604203b17b
--- /dev/null
+++ b/libipt/src/pt_sync.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_sync.h"
+#include "pt_packet.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+
+/* A psb packet contains a unique 2-byte repeating pattern.
+ *
+ * There are only two ways to fill up a 64bit work with such a pattern.
+ */
+static const uint64_t psb_pattern[] = {
+ ((uint64_t) pt_psb_lohi | (uint64_t) pt_psb_lohi << 16 |
+ (uint64_t) pt_psb_lohi << 32 | (uint64_t) pt_psb_lohi << 48),
+ ((uint64_t) pt_psb_hilo | (uint64_t) pt_psb_hilo << 16 |
+ (uint64_t) pt_psb_hilo << 32 | (uint64_t) pt_psb_hilo << 48)
+};
+
+static const uint8_t *truncate(const uint8_t *pointer, size_t alignment)
+{
+ uintptr_t raw = (uintptr_t) pointer;
+
+ raw /= alignment;
+ raw *= alignment;
+
+ return (const uint8_t *) raw;
+}
+
+static const uint8_t *align(const uint8_t *pointer, size_t alignment)
+{
+ return truncate(pointer + alignment - 1, alignment);
+}
+
+/* Find a psb packet given a position somewhere in the payload.
+ *
+ * Return the position of the psb packet.
+ * Return NULL, if this is not a psb packet.
+ */
+static const uint8_t *pt_find_psb(const uint8_t *pos,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+ int errcode;
+
+ if (!pos || !config)
+ return NULL;
+
+ begin = config->begin;
+ end = config->end;
+
+ /* Navigate to the end of the psb payload pattern.
+ *
+ * Beware that PSB is an extended opcode. We must not confuse the extend
+ * opcode of the following packet as belonging to the PSB.
+ */
+ if (*pos != pt_psb_hi)
+ pos++;
+
+ for (; (pos + 1) < end; pos += 2) {
+ uint8_t hi, lo;
+
+ hi = pos[0];
+ lo = pos[1];
+
+ if (hi != pt_psb_hi)
+ break;
+
+ if (lo != pt_psb_lo)
+ break;
+ }
+ /*
+ * We're right after the psb payload and within the buffer.
+ * Navigate to the expected beginning of the psb packet.
+ */
+ pos -= ptps_psb;
+
+ /* Check if we're still inside the buffer. */
+ if (pos < begin)
+ return NULL;
+
+ /* Check that this is indeed a psb packet we're at. */
+ if (pos[0] != pt_opc_psb || pos[1] != pt_ext_psb)
+ return NULL;
+
+ errcode = pt_pkt_read_psb(pos, config);
+ if (errcode < 0)
+ return NULL;
+
+ return pos;
+}
+
+static int pt_sync_within_bounds(const uint8_t *pos, const uint8_t *begin,
+ const uint8_t *end)
+{
+ /* We allow @pos == @end representing the very end of the trace.
+ *
+ * This will result in -pte_eos when we actually try to read from @pos.
+ */
+ return (begin <= pos) && (pos <= end);
+}
+
+int pt_sync_set(const uint8_t **sync, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+ int errcode;
+
+ if (!sync || !pos || !config)
+ return -pte_internal;
+
+ begin = config->begin;
+ end = config->end;
+
+ if (!pt_sync_within_bounds(pos, begin, end))
+ return -pte_eos;
+
+ if (end < pos + 2)
+ return -pte_eos;
+
+ /* Check that this is indeed a psb packet we're at. */
+ if (pos[0] != pt_opc_psb || pos[1] != pt_ext_psb)
+ return -pte_nosync;
+
+ errcode = pt_pkt_read_psb(pos, config);
+ if (errcode < 0)
+ return errcode;
+
+ *sync = pos;
+
+ return 0;
+}
+
+int pt_sync_forward(const uint8_t **sync, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+
+ if (!sync || !pos || !config)
+ return -pte_internal;
+
+ begin = config->begin;
+ end = config->end;
+
+ if (!pt_sync_within_bounds(pos, begin, end))
+ return -pte_internal;
+
+ /* We search for a full 64bit word. It's OK to skip the current one. */
+ pos = align(pos, sizeof(*psb_pattern));
+
+ /* Search for the psb payload pattern in the buffer. */
+ for (;;) {
+ const uint8_t *current = pos;
+ uint64_t val;
+
+ pos += sizeof(uint64_t);
+ if (end < pos)
+ return -pte_eos;
+
+ val = * (const uint64_t *) current;
+
+ if ((val != psb_pattern[0]) && (val != psb_pattern[1]))
+ continue;
+
+ /* We found a 64bit word's worth of psb payload pattern. */
+ current = pt_find_psb(pos, config);
+ if (!current)
+ continue;
+
+ *sync = current;
+ return 0;
+ }
+}
+
+int pt_sync_backward(const uint8_t **sync, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+
+ if (!sync || !pos || !config)
+ return -pte_internal;
+
+ begin = config->begin;
+ end = config->end;
+
+ if (!pt_sync_within_bounds(pos, begin, end))
+ return -pte_internal;
+
+ /* We search for a full 64bit word. It's OK to skip the current one. */
+ pos = truncate(pos, sizeof(*psb_pattern));
+
+ /* Search for the psb payload pattern in the buffer. */
+ for (;;) {
+ const uint8_t *next = pos;
+ uint64_t val;
+
+ pos -= sizeof(uint64_t);
+ if (pos < begin)
+ return -pte_eos;
+
+ val = * (const uint64_t *) pos;
+
+ if ((val != psb_pattern[0]) && (val != psb_pattern[1]))
+ continue;
+
+ /* We found a 64bit word's worth of psb payload pattern. */
+ next = pt_find_psb(next, config);
+ if (!next)
+ continue;
+
+ *sync = next;
+ return 0;
+ }
+}
diff --git a/libipt/src/pt_time.c b/libipt/src/pt_time.c
new file mode 100644
index 000000000000..8c55ccde0fd6
--- /dev/null
+++ b/libipt/src/pt_time.c
@@ -0,0 +1,674 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_time.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+#include <limits.h>
+
+
+void pt_time_init(struct pt_time *time)
+{
+ if (!time)
+ return;
+
+ memset(time, 0, sizeof(*time));
+}
+
+int pt_time_query_tsc(uint64_t *tsc, uint32_t *lost_mtc,
+ uint32_t *lost_cyc, const struct pt_time *time)
+{
+ if (!tsc || !time)
+ return -pte_internal;
+
+ *tsc = time->tsc;
+
+ if (lost_mtc)
+ *lost_mtc = time->lost_mtc;
+ if (lost_cyc)
+ *lost_cyc = time->lost_cyc;
+
+ if (!time->have_tsc)
+ return -pte_no_time;
+
+ return 0;
+}
+
+int pt_time_query_cbr(uint32_t *cbr, const struct pt_time *time)
+{
+ if (!cbr || !time)
+ return -pte_internal;
+
+ if (!time->have_cbr)
+ return -pte_no_cbr;
+
+ *cbr = time->cbr;
+
+ return 0;
+}
+
+/* Compute the distance between two CTC sources.
+ *
+ * We adjust a single wrap-around but fail if the distance is bigger than that.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_time_ctc_delta(uint32_t *ctc_delta, uint32_t ctc,
+ uint32_t last_ctc, const struct pt_config *config)
+{
+ if (!config || !ctc_delta)
+ return -pte_internal;
+
+ /* Correct a single wrap-around. If we lost enough MTCs to wrap
+ * around twice, timing will be wrong until the next TSC.
+ */
+ if (ctc < last_ctc) {
+ ctc += 1u << (config->mtc_freq + pt_pl_mtc_bit_size);
+
+ /* Since we only store the CTC between TMA/MTC or MTC/TMC a
+ * single correction should suffice.
+ */
+ if (ctc < last_ctc)
+ return -pte_bad_packet;
+ }
+
+ *ctc_delta = ctc - last_ctc;
+ return 0;
+}
+
+/* Translate CTC into the same unit as the FastCounter by multiplying with P.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_time_ctc_fc(uint64_t *fc, uint64_t ctc,
+ const struct pt_config *config)
+{
+ uint32_t eax, ebx;
+
+ if (!fc || !config)
+ return -pte_internal;
+
+ eax = config->cpuid_0x15_eax;
+ ebx = config->cpuid_0x15_ebx;
+
+ /* Neither multiply nor divide by zero. */
+ if (!eax || !ebx)
+ return -pte_bad_config;
+
+ *fc = (ctc * ebx) / eax;
+ return 0;
+}
+
+int pt_time_update_tsc(struct pt_time *time,
+ const struct pt_packet_tsc *packet,
+ const struct pt_config *config)
+{
+ (void) config;
+
+ if (!time || !packet)
+ return -pte_internal;
+
+ time->have_tsc = 1;
+ time->have_tma = 0;
+ time->have_mtc = 0;
+ time->tsc = time->base = packet->tsc;
+ time->ctc = 0;
+ time->fc = 0ull;
+
+ /* We got the full time; we recover from previous losses. */
+ time->lost_mtc = 0;
+ time->lost_cyc = 0;
+
+ return 0;
+}
+
+int pt_time_update_cbr(struct pt_time *time,
+ const struct pt_packet_cbr *packet,
+ const struct pt_config *config)
+{
+ (void) config;
+
+ if (!time || !packet)
+ return -pte_internal;
+
+ time->have_cbr = 1;
+ time->cbr = packet->ratio;
+
+ return 0;
+}
+
+int pt_time_update_tma(struct pt_time *time,
+ const struct pt_packet_tma *packet,
+ const struct pt_config *config)
+{
+ uint32_t ctc, mtc_freq, mtc_hi, ctc_mask;
+ uint64_t fc;
+
+ if (!time || !packet || !config)
+ return -pte_internal;
+
+ /* Without a TSC something is seriously wrong. */
+ if (!time->have_tsc)
+ return -pte_bad_context;
+
+ /* We shouldn't have more than one TMA per TSC. */
+ if (time->have_tma)
+ return -pte_bad_context;
+
+ /* We're ignoring MTC between TSC and TMA. */
+ if (time->have_mtc)
+ return -pte_internal;
+
+ ctc = packet->ctc;
+ fc = packet->fc;
+
+ mtc_freq = config->mtc_freq;
+ mtc_hi = mtc_freq + pt_pl_mtc_bit_size;
+
+ /* A mask for the relevant CTC bits ignoring high-order bits that are
+ * not provided by MTC.
+ */
+ ctc_mask = (1u << mtc_hi) - 1u;
+
+ time->have_tma = 1;
+ time->base -= fc;
+ time->fc += fc;
+
+ /* If the MTC frequency is low enough that TMA provides the full CTC
+ * value, we can use the TMA as an MTC.
+ *
+ * If it isn't, we will estimate the preceding MTC based on the CTC bits
+ * the TMA provides at the next MTC. We forget about the previous MTC
+ * in this case.
+ *
+ * If no MTC packets are dropped around TMA, we will estimate the
+ * forgotten value again at the next MTC.
+ *
+ * If MTC packets are dropped, we can't really tell where in this
+ * extended MTC period the TSC occurred. The estimation will place it
+ * right before the next MTC.
+ */
+ if (mtc_hi <= pt_pl_tma_ctc_bit_size)
+ time->have_mtc = 1;
+
+ /* In both cases, we store the TMA's CTC bits until the next MTC. */
+ time->ctc = time->ctc_cyc = ctc & ctc_mask;
+
+ return 0;
+}
+
+int pt_time_update_mtc(struct pt_time *time,
+ const struct pt_packet_mtc *packet,
+ const struct pt_config *config)
+{
+ uint32_t last_ctc, ctc, ctc_delta;
+ uint64_t tsc, base;
+ uint8_t mtc_freq;
+ int errcode, have_tsc, have_tma, have_mtc;
+
+ if (!time || !packet || !config)
+ return -pte_internal;
+
+ have_tsc = time->have_tsc;
+ have_tma = time->have_tma;
+ have_mtc = time->have_mtc;
+
+ /* We ignore MTCs between TSC and TMA to avoid apparent CTC overflows.
+ *
+ * Later MTCs will ensure that no time is lost - provided TMA provides
+ * enough bits. If TMA doesn't provide any of the MTC bits we may place
+ * the TSC into the wrong MTC period.
+ */
+ if (have_tsc && !have_tma)
+ return 0;
+
+ base = time->base;
+ last_ctc = time->ctc;
+ mtc_freq = config->mtc_freq;
+
+ ctc = packet->ctc << mtc_freq;
+
+ /* Store our CTC value if we have or would have reset FC. */
+ if (time->fc || time->lost_cyc || !have_mtc)
+ time->ctc_cyc = ctc;
+
+ /* Prepare for the next packet in case we error out below. */
+ time->have_mtc = 1;
+ time->fc = 0ull;
+ time->ctc = ctc;
+
+ /* We recover from previous CYC losses. */
+ time->lost_cyc = 0;
+
+ /* Avoid a big jump when we see the first MTC with an arbitrary CTC
+ * payload.
+ */
+ if (!have_mtc) {
+ uint32_t ctc_lo, ctc_hi;
+
+ /* If we have not seen a TMA, we ignore this first MTC.
+ *
+ * We have no idea where in this MTC period tracing started.
+ * We could lose an entire MTC period or just a tiny fraction.
+ *
+ * On the other hand, if we assumed a previous MTC value, we
+ * might make just the same error.
+ */
+ if (!have_tma)
+ return 0;
+
+ /* The TMA's CTC value didn't provide enough bits - otherwise,
+ * we would have treated the TMA as an MTC.
+ */
+ if (last_ctc & ~pt_pl_tma_ctc_mask)
+ return -pte_internal;
+
+ /* Split this MTC's CTC value into low and high parts with
+ * respect to the bits provided by TMA.
+ */
+ ctc_lo = ctc & pt_pl_tma_ctc_mask;
+ ctc_hi = ctc & ~pt_pl_tma_ctc_mask;
+
+ /* We estimate the high-order CTC bits that are not provided by
+ * TMA based on the CTC bits provided by this MTC.
+ *
+ * We assume that no MTC packets were dropped around TMA. If
+ * there are, we might place the TSC into the wrong MTC period
+ * depending on how many CTC bits TMA provides and how many MTC
+ * packets were dropped.
+ *
+ * Note that we may underflow which results in more bits to be
+ * set than MTC packets may provide. Drop those extra bits.
+ */
+ if (ctc_lo < last_ctc) {
+ ctc_hi -= 1u << pt_pl_tma_ctc_bit_size;
+ ctc_hi &= pt_pl_mtc_mask << mtc_freq;
+ }
+
+ last_ctc |= ctc_hi;
+ }
+
+ errcode = pt_time_ctc_delta(&ctc_delta, ctc, last_ctc, config);
+ if (errcode < 0) {
+ time->lost_mtc += 1;
+ return errcode;
+ }
+
+ errcode = pt_time_ctc_fc(&tsc, ctc_delta, config);
+ if (errcode < 0)
+ return errcode;
+
+ base += tsc;
+ time->tsc = time->base = base;
+
+ return 0;
+}
+
+/* Adjust a CYC packet's payload spanning multiple MTC periods.
+ *
+ * CYC packets measure the Fast Counter since the last CYC(-eligible) packet.
+ * Depending on the CYC threshold, we may not get a CYC for each MTC, so a CYC
+ * period may overlap with or even span multiple MTC periods.
+ *
+ * We can't do much about the overlap case without examining all packets in
+ * the respective periods. We leave this as expected imprecision.
+ *
+ * If we find a CYC packet to span multiple MTC packets, though, we try to
+ * approximate the portion for the current MTC period by subtracting the
+ * estimated portion for previous MTC periods using calibration information.
+ *
+ * We only consider MTC. For the first CYC after TSC, the corresponding TMA
+ * will contain the Fast Counter at TSC.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_time_adjust_cyc(uint64_t *cyc, const struct pt_time *time,
+ const struct pt_config *config, uint64_t fcr)
+{
+ uint32_t last_ctc, ctc, ctc_delta;
+ uint64_t fc, total_cyc, old_cyc;
+ int errcode;
+
+ if (!time || !config || !fcr)
+ return -pte_internal;
+
+ last_ctc = time->ctc_cyc;
+ ctc = time->ctc;
+
+ /* There is nothing to do if this is the current MTC period. */
+ if (ctc == last_ctc)
+ return 0;
+
+ /* Calibration computes
+ *
+ * fc = (ctc_delta * cpuid[0x15].ebx) / cpuid[0x15].eax.
+ * fcr = (fc << pt_tcal_fcr_shr) / cyc
+ *
+ * So cyc = (fc << pt_tcal_fcr_shr) / fcr.
+ */
+
+ errcode = pt_time_ctc_delta(&ctc_delta, ctc, last_ctc, config);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_time_ctc_fc(&fc, ctc_delta, config);
+ if (errcode < 0)
+ return errcode;
+
+ old_cyc = (fc << pt_tcal_fcr_shr) / fcr;
+ total_cyc = *cyc;
+
+ /* Make sure we don't wrap around. If we would, attribute the entire
+ * CYC payload to any previous MTC period.
+ *
+ * We lost an unknown portion of the CYC payload for the current MTC
+ * period, but it's usually better to run too slow than too fast.
+ */
+ if (total_cyc < old_cyc)
+ total_cyc = old_cyc;
+
+ *cyc = total_cyc - old_cyc;
+ return 0;
+}
+
+int pt_time_update_cyc(struct pt_time *time,
+ const struct pt_packet_cyc *packet,
+ const struct pt_config *config, uint64_t fcr)
+{
+ uint64_t cyc, fc;
+
+ if (!time || !packet || !config)
+ return -pte_internal;
+
+ if (!fcr) {
+ time->lost_cyc += 1;
+ return 0;
+ }
+
+ cyc = packet->value;
+ fc = time->fc;
+ if (!fc) {
+ int errcode;
+
+ errcode = pt_time_adjust_cyc(&cyc, time, config, fcr);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ fc += (cyc * fcr) >> pt_tcal_fcr_shr;
+
+ time->fc = fc;
+ time->tsc = time->base + fc;
+
+ return 0;
+}
+
+void pt_tcal_init(struct pt_time_cal *tcal)
+{
+ if (!tcal)
+ return;
+
+ memset(tcal, 0, sizeof(*tcal));
+
+ tcal->min_fcr = UINT64_MAX;
+}
+
+static int pt_tcal_have_fcr(const struct pt_time_cal *tcal)
+{
+ if (!tcal)
+ return 0;
+
+ return (tcal->min_fcr <= tcal->max_fcr);
+}
+
+int pt_tcal_fcr(uint64_t *fcr, const struct pt_time_cal *tcal)
+{
+ if (!fcr || !tcal)
+ return -pte_internal;
+
+ if (!pt_tcal_have_fcr(tcal))
+ return -pte_no_time;
+
+ *fcr = tcal->fcr;
+
+ return 0;
+}
+
+int pt_tcal_set_fcr(struct pt_time_cal *tcal, uint64_t fcr)
+{
+ if (!tcal)
+ return -pte_internal;
+
+ tcal->fcr = fcr;
+
+ if (fcr < tcal->min_fcr)
+ tcal->min_fcr = fcr;
+
+ if (fcr > tcal->max_fcr)
+ tcal->max_fcr = fcr;
+
+ return 0;
+}
+
+int pt_tcal_update_tsc(struct pt_time_cal *tcal,
+ const struct pt_packet_tsc *packet,
+ const struct pt_config *config)
+{
+ (void) config;
+
+ if (!tcal || !packet)
+ return -pte_internal;
+
+ /* A TSC outside of PSB+ may indicate loss of time. We do not use it
+ * for calibration. We store the TSC value for calibration at the next
+ * TSC in PSB+, though.
+ */
+ tcal->tsc = packet->tsc;
+ tcal->cyc_tsc = 0ull;
+
+ return 0;
+}
+
+int pt_tcal_header_tsc(struct pt_time_cal *tcal,
+ const struct pt_packet_tsc *packet,
+ const struct pt_config *config)
+{
+ uint64_t tsc, last_tsc, tsc_delta, cyc, fcr;
+
+ (void) config;
+
+ if (!tcal || !packet)
+ return -pte_internal;
+
+ last_tsc = tcal->tsc;
+ cyc = tcal->cyc_tsc;
+
+ tsc = packet->tsc;
+
+ tcal->tsc = tsc;
+ tcal->cyc_tsc = 0ull;
+
+ if (!last_tsc || !cyc)
+ return 0;
+
+ /* Correct a single wrap-around. */
+ if (tsc < last_tsc) {
+ tsc += 1ull << pt_pl_tsc_bit_size;
+
+ if (tsc < last_tsc)
+ return -pte_bad_packet;
+ }
+
+ tsc_delta = tsc - last_tsc;
+
+ /* We shift the nominator to improve rounding precision.
+ *
+ * Since we're only collecting the CYCs between two TSC, we shouldn't
+ * overflow. Let's rather fail than overflow.
+ */
+ if (tsc_delta & ~(~0ull >> pt_tcal_fcr_shr))
+ return -pte_internal;
+
+ fcr = (tsc_delta << pt_tcal_fcr_shr) / cyc;
+
+ return pt_tcal_set_fcr(tcal, fcr);
+}
+
+int pt_tcal_update_cbr(struct pt_time_cal *tcal,
+ const struct pt_packet_cbr *packet,
+ const struct pt_config *config)
+{
+ /* A CBR outside of PSB+ indicates a frequency change. Reset our
+ * calibration state.
+ */
+ pt_tcal_init(tcal);
+
+ return pt_tcal_header_cbr(tcal, packet, config);
+}
+
+int pt_tcal_header_cbr(struct pt_time_cal *tcal,
+ const struct pt_packet_cbr *packet,
+ const struct pt_config *config)
+{
+ uint64_t cbr, p1, fcr;
+
+ if (!tcal || !packet || !config)
+ return -pte_internal;
+
+ p1 = config->nom_freq;
+ if (!p1)
+ return 0;
+
+ /* If we know the nominal frequency, we can use it for calibration. */
+ cbr = packet->ratio;
+
+ fcr = (p1 << pt_tcal_fcr_shr) / cbr;
+
+ return pt_tcal_set_fcr(tcal, fcr);
+}
+
+int pt_tcal_update_tma(struct pt_time_cal *tcal,
+ const struct pt_packet_tma *packet,
+ const struct pt_config *config)
+{
+ (void) tcal;
+ (void) packet;
+ (void) config;
+
+ /* Nothing to do. */
+ return 0;
+}
+
+int pt_tcal_update_mtc(struct pt_time_cal *tcal,
+ const struct pt_packet_mtc *packet,
+ const struct pt_config *config)
+{
+ uint32_t last_ctc, ctc, ctc_delta, have_mtc;
+ uint64_t cyc, fc, fcr;
+ int errcode;
+
+ if (!tcal || !packet || !config)
+ return -pte_internal;
+
+ last_ctc = tcal->ctc;
+ have_mtc = tcal->have_mtc;
+ cyc = tcal->cyc_mtc;
+
+ ctc = packet->ctc << config->mtc_freq;
+
+ /* We need at least two MTC (including this). */
+ if (!have_mtc) {
+ tcal->cyc_mtc = 0ull;
+ tcal->ctc = ctc;
+ tcal->have_mtc = 1;
+
+ return 0;
+ }
+
+ /* Without any cycles, we can't calibrate. Try again at the next
+ * MTC and distribute the cycles over the combined MTC period.
+ */
+ if (!cyc)
+ return 0;
+
+ /* Prepare for the next packet in case we error out below. */
+ tcal->have_mtc = 1;
+ tcal->cyc_mtc = 0ull;
+ tcal->ctc = ctc;
+
+ /* Let's pretend we will fail. We'll correct it at the end. */
+ tcal->lost_mtc += 1;
+
+ errcode = pt_time_ctc_delta(&ctc_delta, ctc, last_ctc, config);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_time_ctc_fc(&fc, ctc_delta, config);
+ if (errcode < 0)
+ return errcode;
+
+ /* We shift the nominator to improve rounding precision.
+ *
+ * Since we're only collecting the CYCs between two MTC, we shouldn't
+ * overflow. Let's rather fail than overflow.
+ */
+ if (fc & ~(~0ull >> pt_tcal_fcr_shr))
+ return -pte_internal;
+
+ fcr = (fc << pt_tcal_fcr_shr) / cyc;
+
+ errcode = pt_tcal_set_fcr(tcal, fcr);
+ if (errcode < 0)
+ return errcode;
+
+ /* We updated the FCR. This recovers from previous MTC losses. */
+ tcal->lost_mtc = 0;
+
+ return 0;
+}
+
+int pt_tcal_update_cyc(struct pt_time_cal *tcal,
+ const struct pt_packet_cyc *packet,
+ const struct pt_config *config)
+{
+ uint64_t cyc;
+
+ (void) config;
+
+ if (!tcal || !packet)
+ return -pte_internal;
+
+ cyc = packet->value;
+ tcal->cyc_mtc += cyc;
+ tcal->cyc_tsc += cyc;
+
+ return 0;
+}
diff --git a/libipt/src/pt_tnt_cache.c b/libipt/src/pt_tnt_cache.c
new file mode 100644
index 000000000000..453663063c7f
--- /dev/null
+++ b/libipt/src/pt_tnt_cache.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_tnt_cache.h"
+
+#include "intel-pt.h"
+
+
+void pt_tnt_cache_init(struct pt_tnt_cache *cache)
+{
+ if (!cache)
+ return;
+
+ cache->tnt = 0ull;
+ cache->index = 0ull;
+}
+
+int pt_tnt_cache_is_empty(const struct pt_tnt_cache *cache)
+{
+ if (!cache)
+ return -pte_invalid;
+
+ return cache->index == 0;
+}
+
+int pt_tnt_cache_query(struct pt_tnt_cache *cache)
+{
+ int taken;
+
+ if (!cache)
+ return -pte_invalid;
+
+ if (!cache->index)
+ return -pte_bad_query;
+
+ taken = (cache->tnt & cache->index) != 0;
+ cache->index >>= 1;
+
+ return taken;
+}
+
+int pt_tnt_cache_update_tnt(struct pt_tnt_cache *cache,
+ const struct pt_packet_tnt *packet,
+ const struct pt_config *config)
+{
+ uint8_t bit_size;
+
+ (void) config;
+
+ if (!cache || !packet)
+ return -pte_invalid;
+
+ if (cache->index)
+ return -pte_bad_context;
+
+ bit_size = packet->bit_size;
+ if (!bit_size)
+ return -pte_bad_packet;
+
+ cache->tnt = packet->payload;
+ cache->index = 1ull << (bit_size - 1);
+
+ return 0;
+}
diff --git a/libipt/src/pt_version.c b/libipt/src/pt_version.c
new file mode 100644
index 000000000000..09d79573e911
--- /dev/null
+++ b/libipt/src/pt_version.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "intel-pt.h"
+
+
+struct pt_version pt_library_version(void)
+{
+ struct pt_version v = {
+ /* .major = */ PT_VERSION_MAJOR,
+ /* .minor = */ PT_VERSION_MINOR,
+ /* .reserved = */ 0,
+ /* .build = */ PT_VERSION_BUILD,
+ /* .ext = */ PT_VERSION_EXT
+ };
+
+ return v;
+}
diff --git a/libipt/src/windows/init.c b/libipt/src/windows/init.c
new file mode 100644
index 000000000000..f679be2746f8
--- /dev/null
+++ b/libipt/src/windows/init.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_ild.h"
+
+#include <windows.h>
+
+
+BOOLEAN WINAPI DllMain(HINSTANCE handle, DWORD reason, LPVOID reserved)
+{
+ (void) handle;
+ (void) reserved;
+
+ switch (reason) {
+ case DLL_PROCESS_ATTACH:
+ /* Initialize the Intel(R) Processor Trace instruction
+ decoder. */
+ pt_ild_init();
+ break;
+
+ default:
+ break;
+ }
+
+ return TRUE;
+}
diff --git a/libipt/src/windows/pt_cpuid.c b/libipt/src/windows/pt_cpuid.c
new file mode 100644
index 000000000000..40013b92fa51
--- /dev/null
+++ b/libipt/src/windows/pt_cpuid.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_cpuid.h"
+
+#include <intrin.h>
+
+extern void pt_cpuid(uint32_t leaf, uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ int cpu_info[4];
+
+ __cpuid(cpu_info, leaf);
+ *eax = cpu_info[0];
+ *ebx = cpu_info[1];
+ *ecx = cpu_info[2];
+ *edx = cpu_info[3];
+}
diff --git a/libipt/src/windows/pt_section_windows.c b/libipt/src/windows/pt_section_windows.c
new file mode 100644
index 000000000000..73b447a44daf
--- /dev/null
+++ b/libipt/src/windows/pt_section_windows.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_section.h"
+#include "pt_section_windows.h"
+#include "pt_section_file.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <io.h>
+
+
+static int pt_sec_windows_fstat(const char *filename, struct _stat *stat)
+{
+ int fd, errcode;
+
+ if (!filename || !stat)
+ return -pte_internal;
+
+ fd = _open(filename, _O_RDONLY);
+ if (fd == -1)
+ return -pte_bad_image;
+
+ errcode = _fstat(fd, stat);
+
+ _close(fd);
+
+ if (errcode)
+ return -pte_bad_image;
+
+ return 0;
+}
+
+int pt_section_mk_status(void **pstatus, uint64_t *psize, const char *filename)
+{
+ struct pt_sec_windows_status *status;
+ struct _stat stat;
+ int errcode;
+
+ if (!pstatus || !psize)
+ return -pte_internal;
+
+ errcode = pt_sec_windows_fstat(filename, &stat);
+ if (errcode < 0)
+ return errcode;
+
+ if (stat.st_size < 0)
+ return -pte_bad_image;
+
+ status = malloc(sizeof(*status));
+ if (!status)
+ return -pte_nomem;
+
+ status->stat = stat;
+
+ *pstatus = status;
+ *psize = stat.st_size;
+
+ return 0;
+}
+
+static int check_file_status(struct pt_section *section, int fd)
+{
+ struct pt_sec_windows_status *status;
+ struct _stat stat;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = _fstat(fd, &stat);
+ if (errcode)
+ return -pte_bad_image;
+
+ status = section->status;
+ if (!status)
+ return -pte_internal;
+
+ if (stat.st_size != status->stat.st_size)
+ return -pte_bad_image;
+
+ if (stat.st_mtime != status->stat.st_mtime)
+ return -pte_bad_image;
+
+ return 0;
+}
+
+static DWORD granularity(void)
+{
+ struct _SYSTEM_INFO sysinfo;
+
+ GetSystemInfo(&sysinfo);
+
+ return sysinfo.dwAllocationGranularity;
+}
+
+int pt_sec_windows_map(struct pt_section *section, int fd)
+{
+ struct pt_sec_windows_mapping *mapping;
+ uint64_t offset, size, adjustment;
+ HANDLE fh, mh;
+ DWORD dsize;
+ uint8_t *base;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ offset = section->offset;
+ size = section->size;
+
+ adjustment = offset % granularity();
+
+ offset -= adjustment;
+ size += adjustment;
+
+ /* The section is supposed to fit into the file so we shouldn't
+ * see any overflows, here.
+ */
+ if (size < section->size)
+ return -pte_internal;
+
+ dsize = (DWORD) size;
+ if ((uint64_t) dsize != size)
+ return -pte_internal;
+
+ fh = (HANDLE) _get_osfhandle(fd);
+
+ mh = CreateFileMapping(fh, NULL, PAGE_READONLY, 0, 0, NULL);
+ if (!mh)
+ return -pte_bad_image;
+
+ base = MapViewOfFile(mh, FILE_MAP_READ, (DWORD) (offset >> 32),
+ (DWORD) (uint32_t) offset, dsize);
+ if (!base) {
+ errcode = -pte_bad_image;
+ goto out_mh;
+ }
+
+ mapping = malloc(sizeof(*mapping));
+ if (!mapping) {
+ errcode = -pte_nomem;
+ goto out_map;
+ }
+
+ mapping->fd = fd;
+ mapping->mh = mh;
+ mapping->base = base;
+ mapping->begin = base + adjustment;
+ mapping->end = base + size;
+
+ section->mapping = mapping;
+ section->unmap = pt_sec_windows_unmap;
+ section->read = pt_sec_windows_read;
+ section->memsize = pt_sec_windows_memsize;
+
+ return 0;
+
+out_map:
+ UnmapViewOfFile(base);
+
+out_mh:
+ CloseHandle(mh);
+ return errcode;
+}
+
+static int pt_sec_windows_map_success(struct pt_section *section)
+{
+ uint16_t mcount;
+ int errcode, status;
+
+ if (!section)
+ return -pte_internal;
+
+ mcount = section->mcount + 1;
+ if (!mcount) {
+ (void) pt_section_unlock(section);
+ return -pte_overflow;
+ }
+
+ section->mcount = mcount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_section_on_map(section);
+ if (status < 0) {
+ /* We had to release the section lock for pt_section_on_map() so
+ * @section may have meanwhile been mapped by other threads.
+ *
+ * We still want to return the error so we release our mapping.
+ * Our caller does not yet know whether pt_section_map()
+ * succeeded.
+ */
+ (void) pt_section_unmap(section);
+ return status;
+ }
+
+ return 0;
+}
+
+int pt_section_map(struct pt_section *section)
+{
+ const char *filename;
+ HANDLE fh;
+ FILE *file;
+ int fd, errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (section->mcount)
+ return pt_sec_windows_map_success(section);
+
+ if (section->mapping) {
+ errcode = -pte_internal;
+ goto out_unlock;
+ }
+
+ filename = section->filename;
+ if (!filename) {
+ errcode = -pte_internal;
+ goto out_unlock;
+ }
+
+ fh = CreateFile(filename, GENERIC_READ, FILE_SHARE_READ, NULL,
+ OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (fh == INVALID_HANDLE_VALUE) {
+ /* We failed to open the file read-only. Let's try to open it
+ * read-write; maybe our user has the file open for writing.
+ *
+ * We will detect changes to the file via fstat().
+ */
+
+ fh = CreateFile(filename, GENERIC_READ, FILE_SHARE_WRITE, NULL,
+ OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (fh == INVALID_HANDLE_VALUE) {
+ errcode = -pte_bad_image;
+ goto out_unlock;
+ }
+ }
+
+ fd = _open_osfhandle((intptr_t) fh, _O_RDONLY);
+ if (fd == -1) {
+ errcode = -pte_bad_image;
+ goto out_fh;
+ }
+
+ errcode = check_file_status(section, fd);
+ if (errcode < 0) {
+ errcode = -pte_bad_image;
+ goto out_fd;
+ }
+
+ /* We leave the file open on success. It will be closed when the
+ * section is unmapped.
+ */
+ errcode = pt_sec_windows_map(section, fd);
+ if (!errcode)
+ return pt_sec_windows_map_success(section);
+
+ /* Fall back to file based sections - report the original error
+ * if we fail to convert the file descriptor.
+ */
+ file = _fdopen(fd, "rb");
+ if (!file) {
+ errcode = -pte_bad_image;
+ goto out_fd;
+ }
+
+ /* We need to keep the file open on success. It will be closed when
+ * the section is unmapped.
+ */
+ errcode = pt_sec_file_map(section, file);
+ if (!errcode)
+ return pt_sec_windows_map_success(section);
+
+ fclose(file);
+ goto out_unlock;
+
+out_fd:
+ _close(fd);
+ return errcode;
+
+out_fh:
+ CloseHandle(fh);
+
+out_unlock:
+ (void) pt_section_unlock(section);
+ return errcode;
+}
+
+int pt_sec_windows_unmap(struct pt_section *section)
+{
+ struct pt_sec_windows_mapping *mapping;
+
+ if (!section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping || !section->unmap || !section->read || !section->memsize)
+ return -pte_internal;
+
+ section->mapping = NULL;
+ section->unmap = NULL;
+ section->read = NULL;
+ section->memsize = NULL;
+
+ UnmapViewOfFile(mapping->begin);
+ CloseHandle(mapping->mh);
+ _close(mapping->fd);
+ free(mapping);
+
+ return 0;
+}
+
+int pt_sec_windows_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ struct pt_sec_windows_mapping *mapping;
+ const uint8_t *begin;
+
+ if (!buffer || !section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping)
+ return -pte_internal;
+
+ /* We already checked in pt_section_read() that the requested memory
+ * lies within the section's boundaries.
+ *
+ * And we checked that the entire section was mapped. There's no need
+ * to check for overflows, again.
+ */
+ begin = mapping->begin + offset;
+
+ memcpy(buffer, begin, size);
+ return (int) size;
+}
+
+
+int pt_sec_windows_memsize(const struct pt_section *section, uint64_t *size)
+{
+ struct pt_sec_windows_mapping *mapping;
+ const uint8_t *begin, *end;
+
+ if (!section || !size)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping)
+ return -pte_internal;
+
+ begin = mapping->base;
+ end = mapping->end;
+
+ if (!begin || !end || end < begin)
+ return -pte_internal;
+
+ *size = (uint64_t) (end - begin);
+
+ return 0;
+}
diff --git a/libipt/test/src/ptunit-asid.c b/libipt/test/src/ptunit-asid.c
new file mode 100644
index 000000000000..5622fa64f713
--- /dev/null
+++ b/libipt/test/src/ptunit-asid.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_asid.h"
+
+#include "intel-pt.h"
+
+#include <stddef.h>
+
+
+static struct ptunit_result from_user_null(void)
+{
+ struct pt_asid user;
+ int errcode;
+
+ pt_asid_init(&user);
+
+ errcode = pt_asid_from_user(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_asid_from_user(NULL, &user);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_default(void)
+{
+ struct pt_asid asid;
+ int errcode;
+
+ errcode = pt_asid_from_user(&asid, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(asid.size, sizeof(asid));
+ ptu_uint_eq(asid.cr3, pt_asid_no_cr3);
+ ptu_uint_eq(asid.vmcs, pt_asid_no_vmcs);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_small(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ user.size = sizeof(user.size);
+
+ errcode = pt_asid_from_user(&asid, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(asid.size, sizeof(asid));
+ ptu_uint_eq(asid.cr3, pt_asid_no_cr3);
+ ptu_uint_eq(asid.vmcs, pt_asid_no_vmcs);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_big(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ user.size = sizeof(user) + 4;
+ user.cr3 = 0x4200ull;
+ user.vmcs = 0x23000ull;
+
+ errcode = pt_asid_from_user(&asid, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(asid.size, sizeof(asid));
+ ptu_uint_eq(asid.cr3, 0x4200ull);
+ ptu_uint_eq(asid.vmcs, 0x23000ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ user.size = sizeof(user);
+ user.cr3 = 0x4200ull;
+ user.vmcs = 0x23000ull;
+
+ errcode = pt_asid_from_user(&asid, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(asid.size, sizeof(asid));
+ ptu_uint_eq(asid.cr3, 0x4200ull);
+ ptu_uint_eq(asid.vmcs, 0x23000ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_cr3(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ user.size = offsetof(struct pt_asid, vmcs);
+ user.cr3 = 0x4200ull;
+ user.vmcs = 0x23000ull;
+
+ errcode = pt_asid_from_user(&asid, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(asid.size, sizeof(asid));
+ ptu_uint_eq(asid.cr3, 0x4200ull);
+ ptu_uint_eq(asid.vmcs, pt_asid_no_vmcs);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result to_user_null(void)
+{
+ struct pt_asid asid;
+ int errcode;
+
+ pt_asid_init(&asid);
+
+ errcode = pt_asid_to_user(NULL, NULL, sizeof(asid));
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_asid_to_user(NULL, &asid, sizeof(asid));
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result to_user_too_small(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ pt_asid_init(&asid);
+
+ errcode = pt_asid_to_user(&user, &asid, 0);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_asid_to_user(&user, &asid, sizeof(user.size) - 1);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result to_user_small(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ memset(&user, 0xcc, sizeof(user));
+ pt_asid_init(&asid);
+
+ errcode = pt_asid_to_user(&user, &asid, sizeof(user.size));
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(user.size, sizeof(user.size));
+ ptu_uint_eq(user.cr3, 0xccccccccccccccccull);
+ ptu_uint_eq(user.vmcs, 0xccccccccccccccccull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result to_user_big(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ memset(&user, 0xcc, sizeof(user));
+ pt_asid_init(&asid);
+ asid.cr3 = 0x4200ull;
+ asid.vmcs = 0x23000ull;
+
+ errcode = pt_asid_to_user(&user, &asid, sizeof(user) + 8);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(user.size, sizeof(asid));
+ ptu_uint_eq(user.cr3, 0x4200ull);
+ ptu_uint_eq(user.vmcs, 0x23000ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result to_user(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ memset(&user, 0xcc, sizeof(user));
+ pt_asid_init(&asid);
+ asid.cr3 = 0x4200ull;
+ asid.vmcs = 0x23000ull;
+
+ errcode = pt_asid_to_user(&user, &asid, sizeof(user));
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(user.size, sizeof(asid));
+ ptu_uint_eq(user.cr3, 0x4200ull);
+ ptu_uint_eq(user.vmcs, 0x23000ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result to_user_cr3(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ memset(&user, 0xcc, sizeof(user));
+ pt_asid_init(&asid);
+ asid.cr3 = 0x4200ull;
+
+ errcode = pt_asid_to_user(&user, &asid, offsetof(struct pt_asid, vmcs));
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(user.size, offsetof(struct pt_asid, vmcs));
+ ptu_uint_eq(user.cr3, 0x4200ull);
+ ptu_uint_eq(user.vmcs, 0xccccccccccccccccull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_null(void)
+{
+ struct pt_asid asid;
+ int errcode;
+
+ pt_asid_init(&asid);
+
+ errcode = pt_asid_match(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_asid_match(NULL, &asid);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_asid_match(&asid, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_default(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ lhs.cr3 = 0x2300ull;
+ lhs.vmcs = 0x42000ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ errcode = pt_asid_match(&rhs, &lhs);
+ ptu_int_eq(errcode, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_default_mixed(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ lhs.cr3 = 0x2300ull;
+ rhs.vmcs = 0x42000ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ errcode = pt_asid_match(&rhs, &lhs);
+ ptu_int_eq(errcode, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_cr3(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ lhs.cr3 = 0x2300ull;
+ rhs.cr3 = 0x2300ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_vmcs(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ lhs.vmcs = 0x23000ull;
+ rhs.vmcs = 0x23000ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ lhs.cr3 = 0x2300ull;
+ rhs.cr3 = 0x2300ull;
+ lhs.vmcs = 0x23000ull;
+ rhs.vmcs = 0x23000ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_cr3_false(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ lhs.cr3 = 0x4200ull;
+ rhs.cr3 = 0x2300ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_vmcs_false(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ lhs.vmcs = 0x42000ull;
+ rhs.vmcs = 0x23000ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, from_user_null);
+ ptu_run(suite, from_user_default);
+ ptu_run(suite, from_user_small);
+ ptu_run(suite, from_user_big);
+ ptu_run(suite, from_user);
+ ptu_run(suite, from_user_cr3);
+
+ ptu_run(suite, to_user_null);
+ ptu_run(suite, to_user_too_small);
+ ptu_run(suite, to_user_small);
+ ptu_run(suite, to_user_big);
+ ptu_run(suite, to_user);
+ ptu_run(suite, to_user_cr3);
+
+ ptu_run(suite, match_null);
+ ptu_run(suite, match_default);
+ ptu_run(suite, match_default_mixed);
+ ptu_run(suite, match_cr3);
+ ptu_run(suite, match_vmcs);
+ ptu_run(suite, match);
+ ptu_run(suite, match_cr3_false);
+ ptu_run(suite, match_vmcs_false);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-block_cache.c b/libipt/test/src/ptunit-block_cache.c
new file mode 100644
index 000000000000..8d9b8889b8ab
--- /dev/null
+++ b/libipt/test/src/ptunit-block_cache.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit_threads.h"
+
+#include "pt_block_cache.h"
+
+#include <string.h>
+
+
+/* A test fixture optionally providing a block cache and automatically freeing
+ * the cache.
+ */
+struct bcache_fixture {
+ /* Threading support. */
+ struct ptunit_thrd_fixture thrd;
+
+ /* The cache - it will be freed automatically. */
+ struct pt_block_cache *bcache;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct bcache_fixture *);
+ struct ptunit_result (*fini)(struct bcache_fixture *);
+};
+
+enum {
+ /* The number of entries in fixture-provided caches. */
+ bfix_nentries = 0x10000,
+
+#if defined(FEATURE_THREADS)
+
+ /* The number of additional threads to use for stress testing. */
+ bfix_threads = 3,
+
+#endif /* defined(FEATURE_THREADS) */
+
+ /* The number of iterations in stress testing. */
+ bfix_iterations = 0x10
+};
+
+static struct ptunit_result cfix_init(struct bcache_fixture *bfix)
+{
+ ptu_test(ptunit_thrd_init, &bfix->thrd);
+
+ bfix->bcache = NULL;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bfix_init(struct bcache_fixture *bfix)
+{
+ ptu_test(cfix_init, bfix);
+
+ bfix->bcache = pt_bcache_alloc(bfix_nentries);
+ ptu_ptr(bfix->bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bfix_fini(struct bcache_fixture *bfix)
+{
+ int thrd;
+
+ ptu_test(ptunit_thrd_fini, &bfix->thrd);
+
+ for (thrd = 0; thrd < bfix->thrd.nthreads; ++thrd)
+ ptu_int_eq(bfix->thrd.result[thrd], 0);
+
+ pt_bcache_free(bfix->bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bcache_entry_size(void)
+{
+ ptu_uint_eq(sizeof(struct pt_bcache_entry), sizeof(uint32_t));
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bcache_size(void)
+{
+ ptu_uint_le(sizeof(struct pt_block_cache),
+ 2 * sizeof(struct pt_bcache_entry));
+
+ return ptu_passed();
+}
+
+static struct ptunit_result free_null(void)
+{
+ pt_bcache_free(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_null(void)
+{
+ struct pt_bcache_entry bce;
+ int errcode;
+
+ memset(&bce, 0, sizeof(bce));
+
+ errcode = pt_bcache_add(NULL, 0ull, bce);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lookup_null(void)
+{
+ struct pt_bcache_entry bce;
+ struct pt_block_cache bcache;
+ int errcode;
+
+ errcode = pt_bcache_lookup(&bce, NULL, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_bcache_lookup(NULL, &bcache, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result alloc(struct bcache_fixture *bfix)
+{
+ bfix->bcache = pt_bcache_alloc(0x10000ull);
+ ptu_ptr(bfix->bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result alloc_min(struct bcache_fixture *bfix)
+{
+ bfix->bcache = pt_bcache_alloc(1ull);
+ ptu_ptr(bfix->bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result alloc_too_big(struct bcache_fixture *bfix)
+{
+ bfix->bcache = pt_bcache_alloc(UINT32_MAX + 1ull);
+ ptu_null(bfix->bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result alloc_zero(struct bcache_fixture *bfix)
+{
+ bfix->bcache = pt_bcache_alloc(0ull);
+ ptu_null(bfix->bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result initially_empty(struct bcache_fixture *bfix)
+{
+ uint64_t index;
+
+ for (index = 0; index < bfix_nentries; ++index) {
+ struct pt_bcache_entry bce;
+ int status;
+
+ memset(&bce, 0xff, sizeof(bce));
+
+ status = pt_bcache_lookup(&bce, bfix->bcache, index);
+ ptu_int_eq(status, 0);
+
+ status = pt_bce_is_valid(bce);
+ ptu_int_eq(status, 0);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_bad_index(struct bcache_fixture *bfix)
+{
+ struct pt_bcache_entry bce;
+ int errcode;
+
+ memset(&bce, 0, sizeof(bce));
+
+ errcode = pt_bcache_add(bfix->bcache, bfix_nentries, bce);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lookup_bad_index(struct bcache_fixture *bfix)
+{
+ struct pt_bcache_entry bce;
+ int errcode;
+
+ errcode = pt_bcache_lookup(&bce, bfix->bcache, bfix_nentries);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add(struct bcache_fixture *bfix, uint64_t index)
+{
+ struct pt_bcache_entry bce, exp;
+ int errcode;
+
+ memset(&bce, 0xff, sizeof(bce));
+ memset(&exp, 0x00, sizeof(exp));
+
+ exp.ninsn = 1;
+ exp.displacement = 7;
+ exp.mode = ptem_64bit;
+ exp.qualifier = ptbq_decode;
+ exp.isize = 7;
+
+ errcode = pt_bcache_add(bfix->bcache, index, exp);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_bcache_lookup(&bce, bfix->bcache, index);
+ ptu_int_eq(errcode, 0);
+
+ ptu_uint_eq(bce.ninsn, exp.ninsn);
+ ptu_int_eq(bce.displacement, exp.displacement);
+ ptu_uint_eq(pt_bce_exec_mode(bce), pt_bce_exec_mode(exp));
+ ptu_uint_eq(pt_bce_qualifier(bce), pt_bce_qualifier(exp));
+ ptu_uint_eq(bce.isize, exp.isize);
+
+ return ptu_passed();
+}
+
+static int worker(void *arg)
+{
+ struct pt_bcache_entry exp;
+ struct pt_block_cache *bcache;
+ uint64_t iter, index;
+
+ bcache = arg;
+ if (!bcache)
+ return -pte_internal;
+
+ memset(&exp, 0x00, sizeof(exp));
+ exp.ninsn = 5;
+ exp.displacement = 28;
+ exp.mode = ptem_64bit;
+ exp.qualifier = ptbq_again;
+ exp.isize = 3;
+
+ for (index = 0; index < bfix_nentries; ++index) {
+ for (iter = 0; iter < bfix_iterations; ++iter) {
+ struct pt_bcache_entry bce;
+ int errcode;
+
+ memset(&bce, 0xff, sizeof(bce));
+
+ errcode = pt_bcache_lookup(&bce, bcache, index);
+ if (errcode < 0)
+ return errcode;
+
+ if (!pt_bce_is_valid(bce)) {
+ errcode = pt_bcache_add(bcache, index, exp);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ errcode = pt_bcache_lookup(&bce, bcache, index);
+ if (errcode < 0)
+ return errcode;
+
+ if (!pt_bce_is_valid(bce))
+ return -pte_nosync;
+
+ if (bce.ninsn != exp.ninsn)
+ return -pte_nosync;
+
+ if (bce.displacement != exp.displacement)
+ return -pte_nosync;
+
+ if (pt_bce_exec_mode(bce) != pt_bce_exec_mode(exp))
+ return -pte_nosync;
+
+ if (pt_bce_qualifier(bce) != pt_bce_qualifier(exp))
+ return -pte_nosync;
+
+ if (bce.isize != exp.isize)
+ return -pte_nosync;
+ }
+ }
+
+ return 0;
+}
+
+static struct ptunit_result stress(struct bcache_fixture *bfix)
+{
+ int errcode;
+
+#if defined(FEATURE_THREADS)
+ {
+ int thrd;
+
+ for (thrd = 0; thrd < bfix_threads; ++thrd)
+ ptu_test(ptunit_thrd_create, &bfix->thrd, worker,
+ bfix->bcache);
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ errcode = worker(bfix->bcache);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct bcache_fixture bfix, cfix;
+ struct ptunit_suite suite;
+
+ bfix.init = bfix_init;
+ bfix.fini = bfix_fini;
+
+ cfix.init = cfix_init;
+ cfix.fini = bfix_fini;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, bcache_entry_size);
+ ptu_run(suite, bcache_size);
+
+ ptu_run(suite, free_null);
+ ptu_run(suite, add_null);
+ ptu_run(suite, lookup_null);
+
+ ptu_run_f(suite, alloc, cfix);
+ ptu_run_f(suite, alloc_min, cfix);
+ ptu_run_f(suite, alloc_too_big, cfix);
+ ptu_run_f(suite, alloc_zero, cfix);
+
+ ptu_run_f(suite, initially_empty, bfix);
+
+ ptu_run_f(suite, add_bad_index, bfix);
+ ptu_run_f(suite, lookup_bad_index, bfix);
+
+ ptu_run_fp(suite, add, bfix, 0ull);
+ ptu_run_fp(suite, add, bfix, bfix_nentries - 1ull);
+ ptu_run_f(suite, stress, bfix);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-config.c b/libipt/test/src/ptunit-config.c
new file mode 100644
index 000000000000..a4332ec112f0
--- /dev/null
+++ b/libipt/test/src/ptunit-config.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_config.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+#include <stddef.h>
+
+
+/* A global fake buffer to pacify static analyzers. */
+static uint8_t buffer[8];
+
+static struct ptunit_result from_user_null(void)
+{
+ struct pt_config config;
+ int errcode;
+
+ errcode = pt_config_from_user(NULL, &config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_config_from_user(&config, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_too_small(void)
+{
+ struct pt_config config, user;
+ int errcode;
+
+ user.size = sizeof(config.size);
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_bad_buffer(void)
+{
+ struct pt_config config, user;
+ int errcode;
+
+ pt_config_init(&user);
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ user.begin = buffer;
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ user.begin = NULL;
+ user.end = buffer;
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ user.begin = &buffer[1];
+ user.end = buffer;
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user(void)
+{
+ struct pt_config config, user;
+ int errcode;
+
+ user.size = sizeof(user);
+ user.begin = buffer;
+ user.end = &buffer[sizeof(buffer)];
+ user.cpu.vendor = pcv_intel;
+ user.errata.bdm70 = 1;
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(config.size, sizeof(config));
+ ptu_ptr_eq(config.begin, buffer);
+ ptu_ptr_eq(config.end, &buffer[sizeof(buffer)]);
+ ptu_int_eq(config.cpu.vendor, pcv_intel);
+ ptu_uint_eq(config.errata.bdm70, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_small(void)
+{
+ struct pt_config config, user;
+ int errcode;
+
+ memset(&config, 0xcd, sizeof(config));
+
+ user.size = offsetof(struct pt_config, cpu);
+ user.begin = buffer;
+ user.end = &buffer[sizeof(buffer)];
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(config.size, offsetof(struct pt_config, cpu));
+ ptu_ptr_eq(config.begin, buffer);
+ ptu_ptr_eq(config.end, &buffer[sizeof(buffer)]);
+ ptu_int_eq(config.cpu.vendor, pcv_unknown);
+ ptu_uint_eq(config.errata.bdm70, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_big(void)
+{
+ struct pt_config config, user;
+ int errcode;
+
+ user.size = sizeof(user) + 4;
+ user.begin = buffer;
+ user.end = &buffer[sizeof(buffer)];
+ user.cpu.vendor = pcv_intel;
+ user.errata.bdm70 = 1;
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(config.size, sizeof(config));
+ ptu_ptr_eq(config.begin, buffer);
+ ptu_ptr_eq(config.end, &buffer[sizeof(buffer)]);
+ ptu_int_eq(config.cpu.vendor, pcv_intel);
+ ptu_uint_eq(config.errata.bdm70, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result size(void)
+{
+ ptu_uint_eq(sizeof(struct pt_errata), 16 * 4);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_size(void)
+{
+ struct pt_conf_addr_filter conf;
+
+ ptu_uint_eq(sizeof(conf.config), 8);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_none(void)
+{
+ struct pt_config config;
+ uint8_t filter;
+
+ pt_config_init(&config);
+
+ ptu_uint_eq(config.addr_filter.config.addr_cfg, 0ull);
+
+ for (filter = 0; filter < 4; ++filter) {
+ uint32_t addr_cfg;
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, filter);
+
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_disabled);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_0(void)
+{
+ struct pt_config config;
+ uint64_t addr_a, addr_b;
+ uint32_t addr_cfg;
+ uint8_t filter;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr0_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr0_a = 0xa000ull;
+ config.addr_filter.addr0_b = 0xb000ull;
+
+ ptu_uint_ne(config.addr_filter.config.addr_cfg, 0ull);
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, 0);
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_filter);
+
+ addr_a = pt_filter_addr_a(&config.addr_filter, 0);
+ ptu_uint_eq(addr_a, 0xa000ull);
+
+ addr_b = pt_filter_addr_b(&config.addr_filter, 0);
+ ptu_uint_eq(addr_b, 0xb000ull);
+
+ for (filter = 1; filter < 4; ++filter) {
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, filter);
+
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_disabled);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_1_3(void)
+{
+ struct pt_config config;
+ uint64_t addr_a, addr_b;
+ uint32_t addr_cfg;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr1_a = 0xa000ull;
+ config.addr_filter.addr1_b = 0xb000ull;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr3_a = 0x100a000ull;
+ config.addr_filter.addr3_b = 0x100b000ull;
+
+ ptu_uint_ne(config.addr_filter.config.addr_cfg, 0ull);
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, 0);
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_disabled);
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, 1);
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_filter);
+
+ addr_a = pt_filter_addr_a(&config.addr_filter, 1);
+ ptu_uint_eq(addr_a, 0xa000ull);
+
+ addr_b = pt_filter_addr_b(&config.addr_filter, 1);
+ ptu_uint_eq(addr_b, 0xb000ull);
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, 2);
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_disabled);
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, 3);
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_stop);
+
+ addr_a = pt_filter_addr_a(&config.addr_filter, 3);
+ ptu_uint_eq(addr_a, 0x100a000ull);
+
+ addr_b = pt_filter_addr_b(&config.addr_filter, 3);
+ ptu_uint_eq(addr_b, 0x100b000ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_oob(uint8_t filter)
+{
+ struct pt_config config;
+ uint64_t addr_a, addr_b;
+ uint32_t addr_cfg;
+
+ pt_config_init(&config);
+
+ memset(&config.addr_filter, 0xcc, sizeof(config.addr_filter));
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, filter);
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_disabled);
+
+ addr_a = pt_filter_addr_a(&config.addr_filter, filter);
+ ptu_uint_eq(addr_a, 0ull);
+
+ addr_b = pt_filter_addr_b(&config.addr_filter, filter);
+ ptu_uint_eq(addr_b, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_ip_in(void)
+{
+ struct pt_config config;
+ int status;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr1_a = 0xa000;
+ config.addr_filter.addr1_b = 0xb000;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr3_a = 0x10a000;
+ config.addr_filter.addr3_b = 0x10b000;
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xa000);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xaf00);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xb000);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10a000);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10af00);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10b000);
+ ptu_int_eq(status, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_ip_out(void)
+{
+ struct pt_config config;
+ int status;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr1_a = 0xa000;
+ config.addr_filter.addr1_b = 0xb000;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr3_a = 0x10a000;
+ config.addr_filter.addr3_b = 0x10b000;
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xfff);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xb001);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x100fff);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10b001);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_stop_in(void)
+{
+ struct pt_config config;
+ int status;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr1_a = 0xa000;
+ config.addr_filter.addr1_b = 0xb000;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr3_a = 0x10a000;
+ config.addr_filter.addr3_b = 0x10b000;
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xa000);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xaf00);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xb000);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10a000);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10af00);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10b000);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_stop_out(void)
+{
+ struct pt_config config;
+ int status;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr1_a = 0xa000;
+ config.addr_filter.addr1_b = 0xb000;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr3_a = 0x10a000;
+ config.addr_filter.addr3_b = 0x10b000;
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xfff);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xb001);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x100fff);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10b001);
+ ptu_int_eq(status, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_ip_out_stop_in(void)
+{
+ struct pt_config config;
+ int status;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr1_a = 0x100f00;
+ config.addr_filter.addr1_b = 0x10af00;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr3_a = 0x10a000;
+ config.addr_filter.addr3_b = 0x10b000;
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10af01);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10b000);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_ip_in_stop_in(void)
+{
+ struct pt_config config;
+ int status;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr1_a = 0x100f00;
+ config.addr_filter.addr1_b = 0x10af00;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr3_a = 0x10a000;
+ config.addr_filter.addr3_b = 0x10b000;
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10af00);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10a0ff);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, from_user_null);
+ ptu_run(suite, from_user_too_small);
+ ptu_run(suite, from_user_bad_buffer);
+ ptu_run(suite, from_user);
+ ptu_run(suite, from_user_small);
+ ptu_run(suite, from_user_big);
+ ptu_run(suite, size);
+
+ ptu_run(suite, addr_filter_size);
+ ptu_run(suite, addr_filter_none);
+ ptu_run(suite, addr_filter_0);
+ ptu_run(suite, addr_filter_1_3);
+ ptu_run_p(suite, addr_filter_oob, 255);
+ ptu_run_p(suite, addr_filter_oob, 8);
+
+ ptu_run(suite, addr_filter_ip_in);
+ ptu_run(suite, addr_filter_ip_out);
+ ptu_run(suite, addr_filter_stop_in);
+ ptu_run(suite, addr_filter_stop_out);
+ ptu_run(suite, addr_filter_ip_out_stop_in);
+ ptu_run(suite, addr_filter_ip_in_stop_in);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-cpp.cpp b/libipt/test/src/ptunit-cpp.cpp
new file mode 100644
index 000000000000..dad41362a7a2
--- /dev/null
+++ b/libipt/test/src/ptunit-cpp.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "intel-pt.h"
+
+
+static struct ptunit_result init_packet_decoder(void)
+{
+ uint8_t buf[1];
+ struct pt_config config;
+ struct pt_packet_decoder *decoder;
+
+ pt_config_init(&config);
+ config.begin = buf;
+ config.end = buf + sizeof(buf);
+
+ decoder = pt_pkt_alloc_decoder(&config);
+ ptu_ptr(decoder);
+ pt_pkt_free_decoder(decoder);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_query_decoder(void)
+{
+ uint8_t buf[1];
+ struct pt_config config;
+ struct pt_query_decoder *query_decoder;
+
+ pt_config_init(&config);
+ config.begin = buf;
+ config.end = buf + sizeof(buf);
+
+ query_decoder = pt_qry_alloc_decoder(&config);
+ ptu_ptr(query_decoder);
+ pt_qry_free_decoder(query_decoder);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init_packet_decoder);
+ ptu_run(suite, init_query_decoder);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-cpu.c b/libipt/test/src/ptunit-cpu.c
new file mode 100644
index 000000000000..a82e0f51cea0
--- /dev/null
+++ b/libipt/test/src/ptunit-cpu.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_cpu.h"
+#include "pt_cpuid.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+
+
+void pt_cpuid(uint32_t leaf, uint32_t *eax, uint32_t *ebx, uint32_t *ecx,
+ uint32_t *edx)
+{
+ (void) leaf;
+ (void) eax;
+ (void) ebx;
+ (void) ecx;
+ (void) edx;
+}
+
+
+static struct ptunit_result cpu_valid(void)
+{
+ struct pt_cpu cpu;
+ int error;
+
+ error = pt_cpu_parse(&cpu, "6/44/2");
+ ptu_int_eq(error, 0);
+ ptu_int_eq(cpu.vendor, pcv_intel);
+ ptu_uint_eq(cpu.family, 6);
+ ptu_uint_eq(cpu.model, 44);
+ ptu_uint_eq(cpu.stepping, 2);
+
+ error = pt_cpu_parse(&cpu, "0xf/0x2c/0xf");
+ ptu_int_eq(error, 0);
+ ptu_int_eq(cpu.vendor, pcv_intel);
+ ptu_uint_eq(cpu.family, 0xf);
+ ptu_uint_eq(cpu.model, 0x2c);
+ ptu_uint_eq(cpu.stepping, 0xf);
+
+ error = pt_cpu_parse(&cpu, "022/054/017");
+ ptu_int_eq(error, 0);
+ ptu_int_eq(cpu.vendor, pcv_intel);
+ ptu_uint_eq(cpu.family, 022);
+ ptu_uint_eq(cpu.model, 054);
+ ptu_uint_eq(cpu.stepping, 017);
+
+ error = pt_cpu_parse(&cpu, "6/44");
+ ptu_int_eq(error, 0);
+ ptu_int_eq(cpu.vendor, pcv_intel);
+ ptu_uint_eq(cpu.family, 6);
+ ptu_uint_eq(cpu.model, 44);
+ ptu_uint_eq(cpu.stepping, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cpu_null(void)
+{
+ struct pt_cpu cpu;
+ int error;
+
+ error = pt_cpu_parse(&cpu, NULL);
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(NULL, "");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(NULL, NULL);
+ ptu_int_eq(error, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cpu_incomplete(void)
+{
+ struct pt_cpu cpu;
+ int error;
+
+ error = pt_cpu_parse(&cpu, "");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6//2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "//");
+ ptu_int_eq(error, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cpu_invalid(void)
+{
+ struct pt_cpu cpu;
+ int error;
+
+ error = pt_cpu_parse(&cpu, "e/44/2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/e/2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/44/e");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "65536/44/2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/256/2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/44/256");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "-1/44/2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/-1/2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/44/-1");
+ ptu_int_eq(error, -pte_invalid);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, cpu_valid);
+ ptu_run(suite, cpu_null);
+ ptu_run(suite, cpu_incomplete);
+ ptu_run(suite, cpu_invalid);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-event_queue.c b/libipt/test/src/ptunit-event_queue.c
new file mode 100644
index 000000000000..41566e708069
--- /dev/null
+++ b/libipt/test/src/ptunit-event_queue.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_event_queue.h"
+
+
+/* A test fixture providing an initialized event queue. */
+struct evq_fixture {
+ /* The event queue. */
+ struct pt_event_queue evq;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct evq_fixture *);
+ struct ptunit_result (*fini)(struct evq_fixture *);
+};
+
+
+static struct ptunit_result efix_init(struct evq_fixture *efix)
+{
+ pt_evq_init(&efix->evq);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result efix_init_pending(struct evq_fixture *efix)
+{
+ struct pt_event *ev;
+ int evb;
+
+ pt_evq_init(&efix->evq);
+
+ for (evb = 0; evb < evb_max; ++evb) {
+ ev = pt_evq_enqueue(&efix->evq, (enum pt_event_binding) evb);
+ ptu_ptr(ev);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result standalone_null(void)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_standalone(NULL);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result standalone(struct evq_fixture *efix)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_standalone(&efix->evq);
+ ptu_ptr(ev);
+ ptu_uint_eq(ev->ip_suppressed, 0ul);
+ ptu_uint_eq(ev->status_update, 0ul);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result enqueue_null(enum pt_event_binding evb)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_enqueue(NULL, evb);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result dequeue_null(enum pt_event_binding evb)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_dequeue(NULL, evb);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result dequeue_empty(struct evq_fixture *efix,
+ enum pt_event_binding evb)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_dequeue(&efix->evq, evb);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result evq_empty(struct evq_fixture *efix,
+ enum pt_event_binding evb)
+{
+ int status;
+
+ status = pt_evq_empty(&efix->evq, evb);
+ ptu_int_gt(status, 0);
+
+ status = pt_evq_pending(&efix->evq, evb);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result evq_pending(struct evq_fixture *efix,
+ enum pt_event_binding evb)
+{
+ int status;
+
+ status = pt_evq_empty(&efix->evq, evb);
+ ptu_int_eq(status, 0);
+
+ status = pt_evq_pending(&efix->evq, evb);
+ ptu_int_gt(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result evq_others_empty(struct evq_fixture *efix,
+ enum pt_event_binding evb)
+{
+ int other;
+
+ for (other = 0; other < evb_max; ++other) {
+ enum pt_event_binding ob;
+
+ ob = (enum pt_event_binding) other;
+ if (ob != evb)
+ ptu_test(evq_empty, efix, ob);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result enqueue_all_dequeue(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ size_t num)
+{
+ struct pt_event *in[evq_max], *out[evq_max];
+ size_t idx;
+
+ ptu_uint_le(num, evq_max - 2);
+
+ for (idx = 0; idx < num; ++idx) {
+ in[idx] = pt_evq_enqueue(&efix->evq, evb);
+ ptu_ptr(in[idx]);
+ }
+
+ ptu_test(evq_pending, efix, evb);
+ ptu_test(evq_others_empty, efix, evb);
+
+ for (idx = 0; idx < num; ++idx) {
+ out[idx] = pt_evq_dequeue(&efix->evq, evb);
+ ptu_ptr_eq(out[idx], in[idx]);
+ }
+
+ ptu_test(evq_empty, efix, evb);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result enqueue_one_dequeue(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ size_t num)
+{
+ size_t idx;
+
+ for (idx = 0; idx < num; ++idx) {
+ struct pt_event *in, *out;
+
+ in = pt_evq_enqueue(&efix->evq, evb);
+ ptu_ptr(in);
+
+ out = pt_evq_dequeue(&efix->evq, evb);
+ ptu_ptr_eq(out, in);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result overflow(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ size_t num)
+{
+ struct pt_event *in[evq_max], *out[evq_max], *ev;
+ size_t idx;
+
+ ptu_uint_le(num, evq_max - 2);
+
+ for (idx = 0; idx < (evq_max - 2); ++idx) {
+ in[idx] = pt_evq_enqueue(&efix->evq, evb);
+ ptu_ptr(in[idx]);
+ }
+
+ for (idx = 0; idx < num; ++idx) {
+ ev = pt_evq_enqueue(&efix->evq, evb);
+ ptu_null(ev);
+ }
+
+ for (idx = 0; idx < num; ++idx) {
+ out[idx] = pt_evq_dequeue(&efix->evq, evb);
+ ptu_ptr_eq(out[idx], in[idx]);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result clear_null(enum pt_event_binding evb)
+{
+ int errcode;
+
+ errcode = pt_evq_clear(NULL, evb);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result clear(struct evq_fixture *efix,
+ enum pt_event_binding evb)
+{
+ int errcode;
+
+ errcode = pt_evq_clear(&efix->evq, evb);
+ ptu_int_eq(errcode, 0);
+
+ ptu_test(evq_empty, efix, evb);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result empty_null(enum pt_event_binding evb)
+{
+ int errcode;
+
+ errcode = pt_evq_empty(NULL, evb);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pending_null(enum pt_event_binding evb)
+{
+ int errcode;
+
+ errcode = pt_evq_pending(NULL, evb);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_null(enum pt_event_binding evb,
+ enum pt_event_type evt)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_find(NULL, evb, evt);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_empty(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ enum pt_event_type evt)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_find(&efix->evq, evb, evt);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_none_evb(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ enum pt_event_type evt)
+{
+ struct pt_event *ev;
+ size_t other;
+
+ for (other = 0; other < evb_max; ++other) {
+ enum pt_event_binding ob;
+
+ ob = (enum pt_event_binding) other;
+ if (ob != evb) {
+ ev = pt_evq_enqueue(&efix->evq, ob);
+ ptu_ptr(ev);
+
+ ev->type = evt;
+ }
+ }
+
+ ev = pt_evq_find(&efix->evq, evb, evt);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result evq_enqueue_other(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ enum pt_event_type evt,
+ size_t num)
+{
+ enum pt_event_type ot;
+ struct pt_event *ev;
+ size_t other;
+
+ for (other = 0; other < num; ++other) {
+ ot = (enum pt_event_type) other;
+ if (ot != evt) {
+ ev = pt_evq_enqueue(&efix->evq, evb);
+ ptu_ptr(ev);
+
+ ev->type = ot;
+ }
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_none_evt(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ enum pt_event_type evt,
+ size_t num)
+{
+ struct pt_event *ev;
+
+ ptu_test(evq_enqueue_other, efix, evb, evt, num);
+
+ ev = pt_evq_find(&efix->evq, evb, evt);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ enum pt_event_type evt,
+ size_t before, size_t after)
+{
+ struct pt_event *in, *out;
+
+ ptu_test(evq_enqueue_other, efix, evb, evt, before);
+
+ in = pt_evq_enqueue(&efix->evq, evb);
+ ptu_ptr(in);
+
+ in->type = evt;
+
+ ptu_test(evq_enqueue_other, efix, evb, evt, after);
+
+ out = pt_evq_find(&efix->evq, evb, evt);
+ ptu_ptr_eq(out, in);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct evq_fixture efix, pfix;
+ struct ptunit_suite suite;
+
+ efix.init = efix_init;
+ efix.fini = NULL;
+
+ pfix.init = efix_init_pending;
+ pfix.fini = NULL;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, standalone_null);
+ ptu_run_f(suite, standalone, efix);
+
+ ptu_run_p(suite, enqueue_null, evb_psbend);
+ ptu_run_p(suite, enqueue_null, evb_tip);
+ ptu_run_p(suite, enqueue_null, evb_fup);
+
+ ptu_run_p(suite, dequeue_null, evb_psbend);
+ ptu_run_p(suite, dequeue_null, evb_tip);
+ ptu_run_p(suite, dequeue_null, evb_fup);
+
+ ptu_run_fp(suite, dequeue_empty, efix, evb_psbend);
+ ptu_run_fp(suite, dequeue_empty, efix, evb_tip);
+ ptu_run_fp(suite, dequeue_empty, efix, evb_fup);
+
+ ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_psbend, 1);
+ ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_psbend, 2);
+ ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_tip, 1);
+ ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_tip, 3);
+ ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_fup, 1);
+ ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_fup, 4);
+
+ ptu_run_fp(suite, enqueue_one_dequeue, efix, evb_psbend, evb_max * 2);
+ ptu_run_fp(suite, enqueue_one_dequeue, efix, evb_tip, evb_max * 2);
+ ptu_run_fp(suite, enqueue_one_dequeue, efix, evb_fup, evb_max * 2);
+
+ ptu_run_fp(suite, overflow, efix, evb_psbend, 1);
+ ptu_run_fp(suite, overflow, efix, evb_tip, 2);
+ ptu_run_fp(suite, overflow, efix, evb_fup, 3);
+
+ ptu_run_p(suite, clear_null, evb_psbend);
+ ptu_run_p(suite, clear_null, evb_tip);
+ ptu_run_p(suite, clear_null, evb_fup);
+
+ ptu_run_fp(suite, clear, efix, evb_psbend);
+ ptu_run_fp(suite, clear, pfix, evb_psbend);
+ ptu_run_fp(suite, clear, efix, evb_tip);
+ ptu_run_fp(suite, clear, pfix, evb_tip);
+ ptu_run_fp(suite, clear, efix, evb_fup);
+ ptu_run_fp(suite, clear, pfix, evb_fup);
+
+ ptu_run_p(suite, empty_null, evb_psbend);
+ ptu_run_p(suite, empty_null, evb_tip);
+ ptu_run_p(suite, empty_null, evb_fup);
+
+ ptu_run_p(suite, pending_null, evb_psbend);
+ ptu_run_p(suite, pending_null, evb_tip);
+ ptu_run_p(suite, pending_null, evb_fup);
+
+ ptu_run_p(suite, find_null, evb_psbend, ptev_enabled);
+ ptu_run_p(suite, find_null, evb_tip, ptev_disabled);
+ ptu_run_p(suite, find_null, evb_fup, ptev_paging);
+
+ ptu_run_fp(suite, find_empty, efix, evb_psbend, ptev_enabled);
+ ptu_run_fp(suite, find_empty, efix, evb_tip, ptev_disabled);
+ ptu_run_fp(suite, find_empty, efix, evb_fup, ptev_paging);
+
+ ptu_run_fp(suite, find_none_evb, efix, evb_psbend, ptev_enabled);
+ ptu_run_fp(suite, find_none_evb, efix, evb_tip, ptev_disabled);
+ ptu_run_fp(suite, find_none_evb, efix, evb_fup, ptev_paging);
+
+ ptu_run_fp(suite, find_none_evt, efix, evb_psbend, ptev_enabled, 3);
+ ptu_run_fp(suite, find_none_evt, efix, evb_tip, ptev_disabled, 4);
+ ptu_run_fp(suite, find_none_evt, efix, evb_fup, ptev_paging, 2);
+
+ ptu_run_fp(suite, find, efix, evb_psbend, ptev_enabled, 0, 3);
+ ptu_run_fp(suite, find, efix, evb_tip, ptev_disabled, 2, 0);
+ ptu_run_fp(suite, find, efix, evb_fup, ptev_paging, 1, 4);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-fetch.c b/libipt/test/src/ptunit-fetch.c
new file mode 100644
index 000000000000..64c2455ef36b
--- /dev/null
+++ b/libipt/test/src/ptunit-fetch.c
@@ -0,0 +1,693 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_decoder_function.h"
+#include "pt_packet_decoder.h"
+#include "pt_query_decoder.h"
+#include "pt_encoder.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+
+/* A test fixture for decoder function fetch tests. */
+struct fetch_fixture {
+ /* The trace buffer. */
+ uint8_t buffer[1024];
+
+ /* A trace configuration. */
+ struct pt_config config;
+
+ /* A trace encoder. */
+ struct pt_encoder encoder;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct fetch_fixture *);
+ struct ptunit_result (*fini)(struct fetch_fixture *);
+};
+
+static struct ptunit_result ffix_init(struct fetch_fixture *ffix)
+{
+ memset(ffix->buffer, pt_opc_bad, sizeof(ffix->buffer));
+
+ memset(&ffix->config, 0, sizeof(ffix->config));
+ ffix->config.size = sizeof(ffix->config);
+ ffix->config.begin = ffix->buffer;
+ ffix->config.end = ffix->buffer + sizeof(ffix->buffer);
+
+ pt_encoder_init(&ffix->encoder, &ffix->config);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ffix_fini(struct fetch_fixture *ffix)
+{
+ pt_encoder_fini(&ffix->encoder);
+
+ return ptu_passed();
+}
+
+
+static struct ptunit_result fetch_null(struct fetch_fixture *ffix)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ errcode = pt_df_fetch(NULL, ffix->config.begin, &ffix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_df_fetch(&dfun, NULL, &ffix->config);
+ ptu_int_eq(errcode, -pte_nosync);
+
+ errcode = pt_df_fetch(&dfun, ffix->config.begin, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_empty(struct fetch_fixture *ffix)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ errcode = pt_df_fetch(&dfun, ffix->config.end, &ffix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_unknown(struct fetch_fixture *ffix)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ ffix->config.begin[0] = pt_opc_bad;
+
+ errcode = pt_df_fetch(&dfun, ffix->config.begin, &ffix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(dfun, &pt_decode_unknown);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_unknown_ext(struct fetch_fixture *ffix)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ ffix->config.begin[0] = pt_opc_ext;
+ ffix->config.begin[1] = pt_ext_bad;
+
+ errcode = pt_df_fetch(&dfun, ffix->config.begin, &ffix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(dfun, &pt_decode_unknown);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_unknown_ext2(struct fetch_fixture *ffix)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ ffix->config.begin[0] = pt_opc_ext;
+ ffix->config.begin[1] = pt_ext_ext2;
+ ffix->config.begin[2] = pt_ext2_bad;
+
+ errcode = pt_df_fetch(&dfun, ffix->config.begin, &ffix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(dfun, &pt_decode_unknown);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_packet(struct fetch_fixture *ffix,
+ const struct pt_packet *packet,
+ const struct pt_decoder_function *df)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ errcode = pt_enc_next(&ffix->encoder, packet);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_df_fetch(&dfun, ffix->config.begin, &ffix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(dfun, df);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_type(struct fetch_fixture *ffix,
+ enum pt_packet_type type,
+ const struct pt_decoder_function *dfun)
+{
+ struct pt_packet packet;
+
+ memset(&packet, 0, sizeof(packet));
+ packet.type = type;
+
+ ptu_test(fetch_packet, ffix, &packet, dfun);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_tnt_8(struct fetch_fixture *ffix)
+{
+ struct pt_packet packet;
+
+ memset(&packet, 0, sizeof(packet));
+ packet.type = ppt_tnt_8;
+ packet.payload.tnt.bit_size = 1;
+
+ ptu_test(fetch_packet, ffix, &packet, &pt_decode_tnt_8);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_mode_exec(struct fetch_fixture *ffix)
+{
+ struct pt_packet packet;
+
+ memset(&packet, 0, sizeof(packet));
+ packet.type = ppt_mode;
+ packet.payload.mode.leaf = pt_mol_exec;
+
+ ptu_test(fetch_packet, ffix, &packet, &pt_decode_mode);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_mode_tsx(struct fetch_fixture *ffix)
+{
+ struct pt_packet packet;
+
+ memset(&packet, 0, sizeof(packet));
+ packet.type = ppt_mode;
+ packet.payload.mode.leaf = pt_mol_tsx;
+
+ ptu_test(fetch_packet, ffix, &packet, &pt_decode_mode);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_exstop_ip(struct fetch_fixture *ffix)
+{
+ struct pt_packet packet;
+
+ memset(&packet, 0, sizeof(packet));
+ packet.type = ppt_exstop;
+ packet.payload.exstop.ip = 1;
+
+ ptu_test(fetch_packet, ffix, &packet, &pt_decode_exstop);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct fetch_fixture ffix;
+ struct ptunit_suite suite;
+
+ ffix.init = ffix_init;
+ ffix.fini = ffix_fini;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run_f(suite, fetch_null, ffix);
+ ptu_run_f(suite, fetch_empty, ffix);
+
+ ptu_run_f(suite, fetch_unknown, ffix);
+ ptu_run_f(suite, fetch_unknown_ext, ffix);
+ ptu_run_f(suite, fetch_unknown_ext2, ffix);
+
+ ptu_run_fp(suite, fetch_type, ffix, ppt_pad, &pt_decode_pad);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_psb, &pt_decode_psb);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_tip, &pt_decode_tip);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_tnt_64, &pt_decode_tnt_64);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_tip_pge, &pt_decode_tip_pge);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_tip_pgd, &pt_decode_tip_pgd);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_fup, &pt_decode_fup);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_pip, &pt_decode_pip);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_ovf, &pt_decode_ovf);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_psbend, &pt_decode_psbend);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_tsc, &pt_decode_tsc);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_cbr, &pt_decode_cbr);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_tma, &pt_decode_tma);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_mtc, &pt_decode_mtc);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_cyc, &pt_decode_cyc);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_stop, &pt_decode_stop);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_vmcs, &pt_decode_vmcs);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_mnt, &pt_decode_mnt);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_exstop, &pt_decode_exstop);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_mwait, &pt_decode_mwait);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_pwre, &pt_decode_pwre);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_pwrx, &pt_decode_pwrx);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_ptw, &pt_decode_ptw);
+
+ ptu_run_f(suite, fetch_tnt_8, ffix);
+ ptu_run_f(suite, fetch_mode_exec, ffix);
+ ptu_run_f(suite, fetch_mode_tsx, ffix);
+ ptu_run_f(suite, fetch_exstop_ip, ffix);
+
+ return ptunit_report(&suite);
+}
+
+
+/* Dummy decode functions to satisfy link dependencies.
+ *
+ * As a nice side-effect, we will know if we need to add more tests when
+ * adding new decoder functions.
+ */
+int pt_pkt_decode_unknown(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_unknown(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_pad(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pad(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_psb(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_psb(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tip(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tip(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tnt_8(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tnt_8(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tnt_64(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tnt_64(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tip_pge(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tip_pge(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tip_pgd(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tip_pgd(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_fup(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_fup(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_fup(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_pip(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pip(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_pip(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_ovf(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_ovf(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_mode(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mode(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_mode(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_psbend(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_psbend(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tsc(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tsc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_tsc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_cbr(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_cbr(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_cbr(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tma(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tma(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_mtc(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mtc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_cyc(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_cyc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_stop(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_stop(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_vmcs(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_vmcs(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_vmcs(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_mnt(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mnt(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_mnt(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_exstop(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_exstop(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_mwait(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mwait(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_pwre(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pwre(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_pwrx(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pwrx(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_ptw(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_ptw(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
diff --git a/libipt/test/src/ptunit-ild.c b/libipt/test/src/ptunit-ild.c
new file mode 100644
index 000000000000..88d3331ceb6f
--- /dev/null
+++ b/libipt/test/src/ptunit-ild.c
@@ -0,0 +1,759 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_ild.h"
+
+#include <string.h>
+
+
+/* Check that an instruction is decoded correctly. */
+static struct ptunit_result ptunit_ild_decode(uint8_t *raw, uint8_t size,
+ enum pt_exec_mode mode)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int errcode;
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ memcpy(insn.raw, raw, size);
+ insn.size = size;
+ insn.mode = mode;
+
+ errcode = pt_ild_decode(&insn, &iext);
+ ptu_int_eq(errcode, 0);
+
+ ptu_uint_eq(insn.size, size);
+ ptu_int_eq(insn.iclass, ptic_other);
+ ptu_int_eq(iext.iclass, PTI_INST_INVALID);
+
+ return ptu_passed();
+}
+
+/* Check that an instruction is decoded and classified correctly. */
+static struct ptunit_result ptunit_ild_classify(uint8_t *raw, uint8_t size,
+ enum pt_exec_mode mode,
+ pti_inst_enum_t iclass)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int errcode;
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ memcpy(insn.raw, raw, size);
+ insn.size = size;
+ insn.mode = mode;
+
+ errcode = pt_ild_decode(&insn, &iext);
+ ptu_int_eq(errcode, 0);
+
+ ptu_uint_eq(insn.size, size);
+ ptu_int_eq(iext.iclass, iclass);
+
+ return ptu_passed();
+}
+
+/* Check that an invalid instruction is detected correctly.
+ *
+ * Note that we intentionally do not detect all invalid instructions. This test
+ * therefore only covers some that we care about.
+ */
+static struct ptunit_result ptunit_ild_invalid(uint8_t *raw, uint8_t size,
+ enum pt_exec_mode mode)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int errcode;
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ memcpy(insn.raw, raw, size);
+ insn.size = size;
+ insn.mode = mode;
+
+ errcode = pt_ild_decode(&insn, &iext);
+ ptu_int_eq(errcode, -pte_bad_insn);
+
+ return ptu_passed();
+}
+
+
+/* Macros to automatically update the test location. */
+#define ptu_decode(insn, size, mode) \
+ ptu_check(ptunit_ild_decode, insn, size, mode)
+
+#define ptu_classify(insn, size, mode, iclass) \
+ ptu_check(ptunit_ild_classify, insn, size, mode, iclass)
+
+/* Macros to also automatically supply the instruction size. */
+#define ptu_decode_s(insn, mode) \
+ ptu_decode(insn, sizeof(insn), mode)
+
+#define ptu_classify_s(insn, mode, iclass) \
+ ptu_classify(insn, sizeof(insn), mode, iclass)
+
+#define ptu_invalid_s(insn, mode) \
+ ptu_check(ptunit_ild_invalid, insn, sizeof(insn), mode)
+
+
+static struct ptunit_result push(void)
+{
+ uint8_t insn[] = { 0x68, 0x11, 0x22, 0x33, 0x44 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result jmp_rel(void)
+{
+ uint8_t insn[] = { 0xE9, 0x60, 0xF9, 0xFF, 0xFF };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_JMP_E9);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result long_nop(void)
+{
+ uint8_t insn[] = { 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0X2E, 0X0F,
+ 0X1F, 0x84, 0x00, 0x00,
+ 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_al_64(void)
+{
+ uint8_t insn[] = { 0x48, 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee,
+ 0xff, 0x11 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_al_32_em64(void)
+{
+ uint8_t insn[] = { 0x67, 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee,
+ 0xff, 0X11 };
+
+ ptu_decode(insn, 6, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_al_32(void)
+{
+ uint8_t insn[] = { 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee };
+
+ ptu_decode(insn, 5, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_al_32_em16(void)
+{
+ uint8_t insn[] = { 0x67, 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee };
+
+ ptu_decode(insn, 6, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_al_16_em32(void)
+{
+ uint8_t insn[] = { 0x67, 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee };
+
+ ptu_decode(insn, 4, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_al_16(void)
+{
+ uint8_t insn[] = { 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee };
+
+ ptu_decode(insn, 3, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result rdtsc(void)
+{
+ uint8_t insn[] = { 0x0f, 0x31 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pcmpistri(void)
+{
+ uint8_t insn[] = { 0x66, 0x0f, 0x3a, 0x63, 0x04, 0x16, 0x1a };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vmovdqa(void)
+{
+ uint8_t insn[] = { 0xc5, 0xf9, 0x6f, 0x25, 0xa9, 0x55, 0x04, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vpandn(void)
+{
+ uint8_t insn[] = { 0xc4, 0x41, 0x29, 0xdf, 0xd1 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result syscall(void)
+{
+ uint8_t insn[] = { 0x0f, 0x05 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_SYSCALL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sysret(void)
+{
+ uint8_t insn[] = { 0x0f, 0x07 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_SYSRET);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sysenter(void)
+{
+ uint8_t insn[] = { 0x0f, 0x34 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_SYSENTER);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sysexit(void)
+{
+ uint8_t insn[] = { 0x0f, 0x35 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_SYSEXIT);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result int3(void)
+{
+ uint8_t insn[] = { 0xcc };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_INT3);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result intn(void)
+{
+ uint8_t insn[] = { 0xcd, 0x06 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_INT);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result iret(void)
+{
+ uint8_t insn[] = { 0xcf };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_IRET);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result call_9a_cd(void)
+{
+ uint8_t insn[] = { 0x9a, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_16bit, PTI_INST_CALL_9A);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result call_9a_cp(void)
+{
+ uint8_t insn[] = { 0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_32bit, PTI_INST_CALL_9A);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result call_ff_3(void)
+{
+ uint8_t insn[] = { 0xff, 0x1c, 0x25, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_CALL_FFr3);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result jmp_ff_5(void)
+{
+ uint8_t insn[] = { 0xff, 0x2c, 0x25, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_JMP_FFr5);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result jmp_ea_cd(void)
+{
+ uint8_t insn[] = { 0xea, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_16bit, PTI_INST_JMP_EA);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result jmp_ea_cp(void)
+{
+ uint8_t insn[] = { 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_32bit, PTI_INST_JMP_EA);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ret_ca(void)
+{
+ uint8_t insn[] = { 0xca, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_RET_CA);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vmlaunch(void)
+{
+ uint8_t insn[] = { 0x0f, 0x01, 0xc2 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_VMLAUNCH);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vmresume(void)
+{
+ uint8_t insn[] = { 0x0f, 0x01, 0xc3 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_VMRESUME);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vmcall(void)
+{
+ uint8_t insn[] = { 0x0f, 0x01, 0xc1 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_VMCALL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vmptrld(void)
+{
+ uint8_t insn[] = { 0x0f, 0xc7, 0x30 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_VMPTRLD);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result jrcxz(void)
+{
+ uint8_t insn[] = { 0xe3, 0x00 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_JrCXZ);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_eax_moffs64(void)
+{
+ uint8_t insn[] = { 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_eax_moffs64_32(void)
+{
+ uint8_t insn[] = { 0x67, 0xa1, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_rax_moffs64(void)
+{
+ uint8_t insn[] = { 0x48, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_rax_moffs64_32(void)
+{
+ uint8_t insn[] = { 0x67, 0x48, 0xa1, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_ax_moffs64(void)
+{
+ uint8_t insn[] = { 0x66, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_ax_moffs64_32(void)
+{
+ uint8_t insn[] = { 0x67, 0x66, 0xa1, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_eax_moffs32(void)
+{
+ uint8_t insn[] = { 0xa1, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_ax_moffs32(void)
+{
+ uint8_t insn[] = { 0x66, 0xa1, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_ax_moffs16(void)
+{
+ uint8_t insn[] = { 0xa1, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result les(void)
+{
+ uint8_t insn[] = { 0xc4, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result les_disp16(void)
+{
+ uint8_t insn[] = { 0xc4, 0x06, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result les_disp32(void)
+{
+ uint8_t insn[] = { 0xc4, 0x05, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result les_ind_disp8(void)
+{
+ uint8_t insn[] = { 0xc4, 0x40, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result les_ind_disp16(void)
+{
+ uint8_t insn[] = { 0xc4, 0x80, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result les_ind_disp32(void)
+{
+ uint8_t insn[] = { 0xc4, 0x80, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lds(void)
+{
+ uint8_t insn[] = { 0xc5, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lds_disp16(void)
+{
+ uint8_t insn[] = { 0xc5, 0x06, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lds_disp32(void)
+{
+ uint8_t insn[] = { 0xc5, 0x05, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lds_ind_disp8(void)
+{
+ uint8_t insn[] = { 0xc5, 0x40, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lds_ind_disp16(void)
+{
+ uint8_t insn[] = { 0xc5, 0x80, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lds_ind_disp32(void)
+{
+ uint8_t insn[] = { 0xc5, 0x80, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vpshufb(void)
+{
+ uint8_t insn[] = { 0x62, 0x02, 0x05, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bound(void)
+{
+ uint8_t insn[] = { 0x62, 0x02 };
+
+ ptu_decode_s(insn, ptem_32bit);
+ ptu_decode_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result evex_cutoff(void)
+{
+ uint8_t insn[] = { 0x62 };
+
+ ptu_invalid_s(insn, ptem_64bit);
+ ptu_invalid_s(insn, ptem_32bit);
+ ptu_invalid_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptwrite_r32(void)
+{
+ uint8_t insn[] = { 0xf3, 0x0f, 0xae, 0xe7 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_PTWRITE);
+ ptu_classify_s(insn, ptem_32bit, PTI_INST_PTWRITE);
+ ptu_classify_s(insn, ptem_16bit, PTI_INST_PTWRITE);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptwrite_m32(void)
+{
+ uint8_t insn[] = { 0xf3, 0x0f, 0xae, 0x67, 0xcc };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_PTWRITE);
+ ptu_classify_s(insn, ptem_32bit, PTI_INST_PTWRITE);
+ ptu_classify_s(insn, ptem_16bit, PTI_INST_PTWRITE);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptwrite_r64(void)
+{
+ uint8_t insn[] = { 0xf3, 0x48, 0x0f, 0xae, 0xe7 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_PTWRITE);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptwrite_m64(void)
+{
+ uint8_t insn[] = { 0xf3, 0x48, 0x0f, 0xae, 0x67, 0xcc };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_PTWRITE);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ pt_ild_init();
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, push);
+ ptu_run(suite, jmp_rel);
+ ptu_run(suite, long_nop);
+ ptu_run(suite, mov_al_64);
+ ptu_run(suite, mov_al_32);
+ ptu_run(suite, mov_al_32_em64);
+ ptu_run(suite, mov_al_32_em16);
+ ptu_run(suite, mov_al_16_em32);
+ ptu_run(suite, mov_al_16);
+ ptu_run(suite, rdtsc);
+ ptu_run(suite, pcmpistri);
+ ptu_run(suite, vmovdqa);
+ ptu_run(suite, vpandn);
+ ptu_run(suite, syscall);
+ ptu_run(suite, sysret);
+ ptu_run(suite, sysenter);
+ ptu_run(suite, sysexit);
+ ptu_run(suite, int3);
+ ptu_run(suite, intn);
+ ptu_run(suite, iret);
+ ptu_run(suite, call_9a_cd);
+ ptu_run(suite, call_9a_cp);
+ ptu_run(suite, call_ff_3);
+ ptu_run(suite, jmp_ff_5);
+ ptu_run(suite, jmp_ea_cd);
+ ptu_run(suite, jmp_ea_cp);
+ ptu_run(suite, ret_ca);
+ ptu_run(suite, vmlaunch);
+ ptu_run(suite, vmresume);
+ ptu_run(suite, vmcall);
+ ptu_run(suite, vmptrld);
+ ptu_run(suite, jrcxz);
+ ptu_run(suite, mov_eax_moffs64);
+ ptu_run(suite, mov_eax_moffs64_32);
+ ptu_run(suite, mov_rax_moffs64);
+ ptu_run(suite, mov_rax_moffs64_32);
+ ptu_run(suite, mov_ax_moffs64);
+ ptu_run(suite, mov_ax_moffs64_32);
+ ptu_run(suite, mov_eax_moffs32);
+ ptu_run(suite, mov_ax_moffs32);
+ ptu_run(suite, mov_ax_moffs16);
+ ptu_run(suite, les);
+ ptu_run(suite, les_disp16);
+ ptu_run(suite, les_disp32);
+ ptu_run(suite, les_ind_disp8);
+ ptu_run(suite, les_ind_disp16);
+ ptu_run(suite, les_ind_disp32);
+ ptu_run(suite, lds);
+ ptu_run(suite, lds_disp16);
+ ptu_run(suite, lds_disp32);
+ ptu_run(suite, lds_ind_disp8);
+ ptu_run(suite, lds_ind_disp16);
+ ptu_run(suite, lds_ind_disp32);
+ ptu_run(suite, vpshufb);
+ ptu_run(suite, bound);
+ ptu_run(suite, evex_cutoff);
+ ptu_run(suite, ptwrite_r32);
+ ptu_run(suite, ptwrite_m32);
+ ptu_run(suite, ptwrite_r64);
+ ptu_run(suite, ptwrite_m64);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-image.c b/libipt/test/src/ptunit-image.c
new file mode 100644
index 000000000000..f635dc366b27
--- /dev/null
+++ b/libipt/test/src/ptunit-image.c
@@ -0,0 +1,2286 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_image.h"
+#include "pt_section.h"
+#include "pt_mapped_section.h"
+
+#include "intel-pt.h"
+
+
+struct image_fixture;
+
+/* A test mapping. */
+struct ifix_mapping {
+ /* The contents. */
+ uint8_t content[0x10];
+
+ /* The size - between 0 and sizeof(content). */
+ uint64_t size;
+
+ /* An artificial error code to be injected into pt_section_read().
+ *
+ * If @errcode is non-zero, pt_section_read() fails with @errcode.
+ */
+ int errcode;
+};
+
+/* A test file status - turned into a section status. */
+struct ifix_status {
+ /* Delete indication:
+ * - zero if initialized and not (yet) deleted
+ * - non-zero if deleted and not (re-)initialized
+ */
+ int deleted;
+
+ /* Put with use-count of zero indication. */
+ int bad_put;
+
+ /* The test mapping to be used. */
+ struct ifix_mapping *mapping;
+
+ /* A link back to the test fixture providing this section. */
+ struct image_fixture *ifix;
+};
+
+enum {
+ ifix_nsecs = 5
+};
+
+/* A fake image section cache. */
+struct pt_image_section_cache {
+ /* The cached sections. */
+ struct pt_section *section[ifix_nsecs];
+
+ /* Their load addresses. */
+ uint64_t laddr[ifix_nsecs];
+
+ /* The number of used sections. */
+ int nsecs;
+};
+
+extern int pt_iscache_lookup(struct pt_image_section_cache *iscache,
+ struct pt_section **section, uint64_t *laddr,
+ int isid);
+
+
+/* A test fixture providing an image, test sections, and asids. */
+struct image_fixture {
+ /* The image. */
+ struct pt_image image;
+
+ /* The test states. */
+ struct ifix_status status[ifix_nsecs];
+
+ /* The test mappings. */
+ struct ifix_mapping mapping[ifix_nsecs];
+
+ /* The sections. */
+ struct pt_section section[ifix_nsecs];
+
+ /* The asids. */
+ struct pt_asid asid[3];
+
+ /* The number of used sections/mappings/states. */
+ int nsecs;
+
+ /* An initially empty image as destination for image copies. */
+ struct pt_image copy;
+
+ /* A test section cache. */
+ struct pt_image_section_cache iscache;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct image_fixture *);
+ struct ptunit_result (*fini)(struct image_fixture *);
+};
+
+static void ifix_init_section(struct pt_section *section, char *filename,
+ struct ifix_status *status,
+ struct ifix_mapping *mapping,
+ struct image_fixture *ifix)
+{
+ uint8_t i;
+
+ memset(section, 0, sizeof(*section));
+
+ section->filename = filename;
+ section->status = status;
+ section->size = mapping->size = sizeof(mapping->content);
+ section->offset = 0x10;
+
+ for (i = 0; i < mapping->size; ++i)
+ mapping->content[i] = i;
+
+ status->deleted = 0;
+ status->bad_put = 0;
+ status->mapping = mapping;
+ status->ifix = ifix;
+}
+
+static int ifix_add_section(struct image_fixture *ifix, char *filename)
+{
+ int index;
+
+ if (!ifix)
+ return -pte_internal;
+
+ index = ifix->nsecs;
+ if (ifix_nsecs <= index)
+ return -pte_internal;
+
+ ifix_init_section(&ifix->section[index], filename, &ifix->status[index],
+ &ifix->mapping[index], ifix);
+
+ ifix->nsecs += 1;
+ return index;
+}
+
+static int ifix_cache_section(struct image_fixture *ifix,
+ struct pt_section *section, uint64_t laddr)
+{
+ int index;
+
+ if (!ifix)
+ return -pte_internal;
+
+ index = ifix->iscache.nsecs;
+ if (ifix_nsecs <= index)
+ return -pte_internal;
+
+ ifix->iscache.section[index] = section;
+ ifix->iscache.laddr[index] = laddr;
+
+ index += 1;
+ ifix->iscache.nsecs = index;
+
+ return index;
+}
+
+const char *pt_section_filename(const struct pt_section *section)
+{
+ if (!section)
+ return NULL;
+
+ return section->filename;
+}
+
+uint64_t pt_section_offset(const struct pt_section *section)
+{
+ if (!section)
+ return 0ull;
+
+ return section->offset;
+}
+
+uint64_t pt_section_size(const struct pt_section *section)
+{
+ if (!section)
+ return 0ull;
+
+ return section->size;
+}
+
+struct pt_section *pt_mk_section(const char *file, uint64_t offset,
+ uint64_t size)
+{
+ (void) file;
+ (void) offset;
+ (void) size;
+
+ /* This function is not used by our tests. */
+ return NULL;
+}
+
+int pt_section_get(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+ section->ucount += 1;
+ return 0;
+}
+
+int pt_section_put(struct pt_section *section)
+{
+ struct ifix_status *status;
+ uint16_t ucount;
+
+ if (!section)
+ return -pte_internal;
+
+ status = section->status;
+ if (!status)
+ return -pte_internal;
+
+ ucount = section->ucount;
+ if (!ucount) {
+ status->bad_put += 1;
+
+ return -pte_internal;
+ }
+
+ ucount = --section->ucount;
+ if (!ucount) {
+ status->deleted += 1;
+
+ if (status->deleted > 1)
+ return -pte_internal;
+ }
+
+ return 0;
+}
+
+int pt_iscache_lookup(struct pt_image_section_cache *iscache,
+ struct pt_section **section, uint64_t *laddr, int isid)
+{
+ if (!iscache || !section || !laddr)
+ return -pte_internal;
+
+ if (!isid || iscache->nsecs < isid)
+ return -pte_bad_image;
+
+ isid -= 1;
+
+ *section = iscache->section[isid];
+ *laddr = iscache->laddr[isid];
+
+ return pt_section_get(*section);
+}
+
+static int ifix_unmap(struct pt_section *section)
+{
+ uint16_t mcount;
+
+ if (!section)
+ return -pte_internal;
+
+ mcount = section->mcount;
+ if (!mcount)
+ return -pte_internal;
+
+ if (!section->mapping)
+ return -pte_internal;
+
+ mcount = --section->mcount;
+ if (!mcount)
+ section->mapping = NULL;
+
+ return 0;
+}
+
+static int ifix_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ struct ifix_mapping *mapping;
+ uint64_t begin, end;
+
+ if (!section || !buffer)
+ return -pte_internal;
+
+ begin = offset;
+ end = begin + size;
+
+ if (end < begin)
+ return -pte_nomap;
+
+ mapping = section->mapping;
+ if (!mapping)
+ return -pte_nomap;
+
+ if (mapping->errcode)
+ return mapping->errcode;
+
+ if (mapping->size <= begin)
+ return -pte_nomap;
+
+ if (mapping->size < end) {
+ end = mapping->size;
+ size = (uint16_t) (end - begin);
+ }
+
+ memcpy(buffer, &mapping->content[begin], size);
+
+ return size;
+}
+
+int pt_section_map(struct pt_section *section)
+{
+ struct ifix_status *status;
+ uint16_t mcount;
+
+ if (!section)
+ return -pte_internal;
+
+ mcount = section->mcount++;
+ if (mcount)
+ return 0;
+
+ if (section->mapping)
+ return -pte_internal;
+
+ status = section->status;
+ if (!status)
+ return -pte_internal;
+
+ section->mapping = status->mapping;
+ section->unmap = ifix_unmap;
+ section->read = ifix_read;
+
+ return 0;
+}
+
+int pt_section_on_map_lock(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+ return 0;
+}
+
+int pt_section_unmap(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+ if (!section->unmap)
+ return -pte_nomap;
+
+ return section->unmap(section);
+}
+
+int pt_section_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ if (!section)
+ return -pte_internal;
+
+ if (!section->read)
+ return -pte_nomap;
+
+ return section->read(section, buffer, size, offset);
+}
+
+/* A test read memory callback. */
+static int image_readmem_callback(uint8_t *buffer, size_t size,
+ const struct pt_asid *asid,
+ uint64_t ip, void *context)
+{
+ const uint8_t *memory;
+ size_t idx;
+
+ (void) asid;
+
+ if (!buffer)
+ return -pte_invalid;
+
+ /* We use a constant offset of 0x3000. */
+ if (ip < 0x3000ull)
+ return -pte_nomap;
+
+ ip -= 0x3000ull;
+
+ memory = (const uint8_t *) context;
+ if (!memory)
+ return -pte_internal;
+
+ for (idx = 0; idx < size; ++idx)
+ buffer[idx] = memory[ip + idx];
+
+ return (int) idx;
+}
+
+static struct ptunit_result init(void)
+{
+ struct pt_image image;
+
+ memset(&image, 0xcd, sizeof(image));
+
+ pt_image_init(&image, NULL);
+ ptu_null(image.name);
+ ptu_null(image.sections);
+ ptu_null((void *) (uintptr_t) image.readmem.callback);
+ ptu_null(image.readmem.context);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_name(struct image_fixture *ifix)
+{
+ memset(&ifix->image, 0xcd, sizeof(ifix->image));
+
+ pt_image_init(&ifix->image, "image-name");
+ ptu_str_eq(ifix->image.name, "image-name");
+ ptu_null(ifix->image.sections);
+ ptu_null((void *) (uintptr_t) ifix->image.readmem.callback);
+ ptu_null(ifix->image.readmem.context);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_null(void)
+{
+ pt_image_init(NULL, NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fini(void)
+{
+ struct ifix_mapping mapping;
+ struct ifix_status status;
+ struct pt_section section;
+ struct pt_image image;
+ struct pt_asid asid;
+ int errcode;
+
+ pt_asid_init(&asid);
+ ifix_init_section(&section, NULL, &status, &mapping, NULL);
+
+ pt_image_init(&image, NULL);
+ errcode = pt_image_add(&image, &section, &asid, 0x0ull, 0);
+ ptu_int_eq(errcode, 0);
+
+ pt_image_fini(&image);
+ ptu_int_eq(section.ucount, 0);
+ ptu_int_eq(section.mcount, 0);
+ ptu_int_eq(status.deleted, 1);
+ ptu_int_eq(status.bad_put, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fini_empty(void)
+{
+ struct pt_image image;
+
+ pt_image_init(&image, NULL);
+ pt_image_fini(&image);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fini_null(void)
+{
+ pt_image_fini(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result name(struct image_fixture *ifix)
+{
+ const char *name;
+
+ pt_image_init(&ifix->image, "image-name");
+
+ name = pt_image_name(&ifix->image);
+ ptu_str_eq(name, "image-name");
+
+ return ptu_passed();
+}
+
+static struct ptunit_result name_none(void)
+{
+ struct pt_image image;
+ const char *name;
+
+ pt_image_init(&image, NULL);
+
+ name = pt_image_name(&image);
+ ptu_null(name);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result name_null(void)
+{
+ const char *name;
+
+ name = pt_image_name(NULL);
+ ptu_null(name);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_empty(struct image_fixture *ifix)
+{
+ struct pt_asid asid;
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ pt_asid_init(&asid);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &asid, 0x1000ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0xcc);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result overlap_front(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1001ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1000ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1010ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x0f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ buffer[0] = 0xcc;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x100full);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x0f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result overlap_back(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1001ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1010ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x0f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result overlap_multiple(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1010ull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1008ull, 3);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1007ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x07);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1008ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1017ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x0f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1018ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x08);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result overlap_mid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ ifix->section[1].size = 0x8;
+ ifix->mapping[1].size = 0x8;
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1004ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1004ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x100bull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x07);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x100cull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x0c);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result contained(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ ifix->section[0].size = 0x8;
+ ifix->mapping[0].size = 0x8;
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1004ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1000ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1008ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x08);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result contained_multiple(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ ifix->section[0].size = 0x2;
+ ifix->mapping[0].size = 0x2;
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1004ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1008ull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1000ull, 3);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1004ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x04);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1008ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x08);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result contained_back(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ ifix->section[0].size = 0x8;
+ ifix->mapping[0].size = 0x8;
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1004ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x100cull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1000ull, 3);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1004ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x04);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x100cull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x0c);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x100full);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x0f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1010ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x04);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result same(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1008ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x08);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result same_different_isid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1008ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x08);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result same_different_offset(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc }, i;
+ int status, isid, index;
+
+ /* Add another section from a different part of the same file as an
+ * existing section.
+ */
+ index = ifix_add_section(ifix, ifix->section[0].filename);
+ ptu_int_gt(index, 0);
+
+ ifix->section[index].offset = ifix->section[0].offset + 0x10;
+ ptu_uint_eq(ifix->section[index].size, ifix->section[0].size);
+
+ /* Change the content of the new section so we can distinguish them. */
+ for (i = 0; i < ifix->mapping[index].size; ++i)
+ ifix->mapping[index].content[i] += 0x10;
+
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 0);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[index],
+ &ifix->asid[0], 0x1000ull, 0);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 0);
+ ptu_uint_eq(buffer[0], 0x10);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x100full);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 0);
+ ptu_uint_eq(buffer[0], 0x1f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result adjacent(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1000ull - ifix->section[1].size, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[2], &ifix->asid[0],
+ 0x1000ull + ifix->section[0].size, 3);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0xfffull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0],
+ ifix->mapping[1].content[ifix->mapping[1].size - 1]);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1000ull + ifix->section[0].size);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_null(struct image_fixture *ifix)
+{
+ uint8_t buffer;
+ int status, isid;
+
+ status = pt_image_read(NULL, &isid, &buffer, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_image_read(&ifix->image, NULL, &buffer, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_image_read(&ifix->image, &isid, NULL, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_image_read(&ifix->image, &isid, &buffer, 1, NULL,
+ 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[1],
+ 0x1008ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1009ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x09);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[1],
+ 0x1009ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_bad_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x2003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0xcc);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_null_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, NULL, 0x2003ull);
+ ptu_int_eq(status, -pte_internal);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0xcc);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_callback(struct image_fixture *ifix)
+{
+ uint8_t memory[] = { 0xdd, 0x01, 0x02, 0xdd };
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_set_callback(&ifix->image, image_readmem_callback,
+ memory);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x3001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 0);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_nomem(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[1], 0x1010ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0xcc);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_truncated(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x100full);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x0f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_error(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc };
+ int status, isid;
+
+ ifix->mapping[0].errcode = -pte_nosync;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, -pte_nosync);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_spurious_error(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ ifix->mapping[0].errcode = -pte_nosync;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1005ull);
+ ptu_int_eq(status, -pte_nosync);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x00);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_section(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove(&ifix->image, &ifix->section[0],
+ &ifix->asid[0], 0x1000ull);
+ ptu_int_eq(status, 0);
+
+ ptu_int_ne(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x1003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_bad_vaddr(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove(&ifix->image, &ifix->section[0],
+ &ifix->asid[0], 0x2000ull);
+ ptu_int_eq(status, -pte_bad_image);
+
+ ptu_int_eq(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2005ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x05);
+ ptu_uint_eq(buffer[1], 0x06);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_bad_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove(&ifix->image, &ifix->section[0],
+ &ifix->asid[1], 0x1000ull);
+ ptu_int_eq(status, -pte_bad_image);
+
+ ptu_int_eq(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2005ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x05);
+ ptu_uint_eq(buffer[1], 0x06);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_by_filename(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove_by_filename(&ifix->image,
+ ifix->section[0].filename,
+ &ifix->asid[0]);
+ ptu_int_eq(status, 1);
+
+ ptu_int_ne(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x1003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+remove_by_filename_bad_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove_by_filename(&ifix->image,
+ ifix->section[0].filename,
+ &ifix->asid[1]);
+ ptu_int_eq(status, 0);
+
+ ptu_int_eq(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2005ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x05);
+ ptu_uint_eq(buffer[1], 0x06);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_none_by_filename(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_remove_by_filename(&ifix->image, "bad-name",
+ &ifix->asid[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_int_eq(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_all_by_filename(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ ifix->section[0].filename = "same-name";
+ ifix->section[1].filename = "same-name";
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x2000ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove_by_filename(&ifix->image, "same-name",
+ &ifix->asid[0]);
+ ptu_int_eq(status, 2);
+
+ ptu_int_ne(ifix->status[0].deleted, 0);
+ ptu_int_ne(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x1003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x2003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_by_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove_by_asid(&ifix->image, &ifix->asid[0]);
+ ptu_int_eq(status, 1);
+
+ ptu_int_ne(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x1003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_empty(struct image_fixture *ifix)
+{
+ struct pt_asid asid;
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ pt_asid_init(&asid);
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, sizeof(buffer),
+ &asid, 0x1000ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0xcc);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_self(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_copy(&ifix->image, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_shrink(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->copy, &ifix->section[1], &ifix->asid[1],
+ 0x2000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_split(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->copy, &ifix->section[0], &ifix->asid[0],
+ 0x2000ull, 1);
+ ptu_int_eq(status, 0);
+
+ ifix->section[1].size = 0x7;
+ ifix->mapping[1].size = 0x7;
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x2001ull, 2);
+ ptu_int_eq(status, 0);
+
+ ifix->section[2].size = 0x8;
+ ifix->mapping[2].size = 0x8;
+
+ status = pt_image_add(&ifix->image, &ifix->section[2], &ifix->asid[0],
+ 0x2008ull, 3);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2003ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x02);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2009ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2000ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_merge(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ ifix->section[1].size = 0x8;
+ ifix->mapping[1].size = 0x8;
+
+ status = pt_image_add(&ifix->copy, &ifix->section[1], &ifix->asid[0],
+ 0x2000ull, 1);
+ ptu_int_eq(status, 0);
+
+ ifix->section[2].size = 0x8;
+ ifix->mapping[2].size = 0x8;
+
+ status = pt_image_add(&ifix->copy, &ifix->section[2], &ifix->asid[0],
+ 0x2008ull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x2000ull, 3);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2003ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x200aull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x0a);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_overlap(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->copy, &ifix->section[0], &ifix->asid[0],
+ 0x2000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->copy, &ifix->section[1], &ifix->asid[0],
+ 0x2010ull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[2], &ifix->asid[0],
+ 0x2008ull, 3);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2003ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x200aull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x02);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2016ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x0e);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2019ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x09);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_replace(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ ifix->section[0].size = 0x8;
+ ifix->mapping[0].size = 0x8;
+
+ status = pt_image_add(&ifix->copy, &ifix->section[0], &ifix->asid[0],
+ 0x1004ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1000ull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_cached_null(void)
+{
+ struct pt_image_section_cache iscache;
+ struct pt_image image;
+ int status;
+
+ status = pt_image_add_cached(NULL, &iscache, 0, NULL);
+ ptu_int_eq(status, -pte_invalid);
+
+ status = pt_image_add_cached(&image, NULL, 0, NULL);
+ ptu_int_eq(status, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_cached(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid, risid;
+
+ isid = ifix_cache_section(ifix, &ifix->section[0], 0x1000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_image_add_cached(&ifix->image, &ifix->iscache, isid,
+ &ifix->asid[0]);
+ ptu_int_eq(status, 0);
+
+ risid = -1;
+ status = pt_image_read(&ifix->image, &risid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(risid, isid);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_cached_null_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid, risid;
+
+ isid = ifix_cache_section(ifix, &ifix->section[0], 0x1000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_image_add_cached(&ifix->image, &ifix->iscache, isid, NULL);
+ ptu_int_eq(status, 0);
+
+ risid = -1;
+ status = pt_image_read(&ifix->image, &risid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(risid, isid);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_cached_twice(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid, risid;
+
+ isid = ifix_cache_section(ifix, &ifix->section[0], 0x1000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_image_add_cached(&ifix->image, &ifix->iscache, isid,
+ &ifix->asid[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add_cached(&ifix->image, &ifix->iscache, isid,
+ &ifix->asid[0]);
+ ptu_int_eq(status, 0);
+
+ risid = -1;
+ status = pt_image_read(&ifix->image, &risid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(risid, isid);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_cached_bad_isid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add_cached(&ifix->image, &ifix->iscache, 1,
+ &ifix->asid[0]);
+ ptu_int_eq(status, -pte_bad_image);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_null(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int status;
+
+ status = pt_image_find(NULL, &msec, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_image_find(&ifix->image, NULL, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_image_find(&ifix->image, &msec, NULL, 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int status;
+
+ status = pt_image_find(&ifix->image, &msec, &ifix->asid[1], 0x2003ull);
+ ptu_int_eq(status, 11);
+ ptu_ptr_eq(msec.section, &ifix->section[1]);
+ ptu_uint_eq(msec.vaddr, 0x2000ull);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_asid(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int status;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[1],
+ 0x1008ull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1009ull);
+ ptu_int_eq(status, 1);
+ ptu_ptr_eq(msec.section, &ifix->section[0]);
+ ptu_uint_eq(msec.vaddr, 0x1000ull);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_find(&ifix->image, &msec, &ifix->asid[1], 0x1009ull);
+ ptu_int_eq(status, 2);
+ ptu_ptr_eq(msec.section, &ifix->section[0]);
+ ptu_uint_eq(msec.vaddr, 0x1008ull);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_bad_asid(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int status;
+
+ status = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x2003ull);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_nomem(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int status;
+
+ status = pt_image_find(&ifix->image, &msec, &ifix->asid[1], 0x1010ull);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate_null(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int status;
+
+ status = pt_image_validate(NULL, &msec, 0x1004ull, 10);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_image_validate(&ifix->image, NULL, 0x1004ull, 10);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int isid, status;
+
+ isid = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1003ull);
+ ptu_int_ge(isid, 0);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_validate(&ifix->image, &msec, 0x1004ull, isid);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate_bad_asid(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int isid, status;
+
+ isid = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1003ull);
+ ptu_int_ge(isid, 0);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ msec.asid = ifix->asid[1];
+
+ status = pt_image_validate(&ifix->image, &msec, 0x1004ull, isid);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate_bad_vaddr(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int isid, status;
+
+ isid = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1003ull);
+ ptu_int_ge(isid, 0);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ msec.vaddr = 0x2000ull;
+
+ status = pt_image_validate(&ifix->image, &msec, 0x1004ull, isid);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate_bad_offset(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int isid, status;
+
+ isid = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1003ull);
+ ptu_int_ge(isid, 0);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ msec.offset = 0x8ull;
+
+ status = pt_image_validate(&ifix->image, &msec, 0x1004ull, isid);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate_bad_size(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int isid, status;
+
+ isid = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1003ull);
+ ptu_int_ge(isid, 0);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ msec.size = 0x8ull;
+
+ status = pt_image_validate(&ifix->image, &msec, 0x1004ull, isid);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate_bad_isid(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int isid, status;
+
+ isid = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1003ull);
+ ptu_int_ge(isid, 0);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_validate(&ifix->image, &msec, 0x1004ull, isid + 1);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ifix_init(struct image_fixture *ifix)
+{
+ int index;
+
+ pt_image_init(&ifix->image, NULL);
+ pt_image_init(&ifix->copy, NULL);
+
+ memset(ifix->status, 0, sizeof(ifix->status));
+ memset(ifix->mapping, 0, sizeof(ifix->mapping));
+ memset(ifix->section, 0, sizeof(ifix->section));
+ memset(&ifix->iscache, 0, sizeof(ifix->iscache));
+
+ ifix->nsecs = 0;
+
+ index = ifix_add_section(ifix, "file-0");
+ ptu_int_eq(index, 0);
+
+ index = ifix_add_section(ifix, "file-1");
+ ptu_int_eq(index, 1);
+
+ index = ifix_add_section(ifix, "file-2");
+ ptu_int_eq(index, 2);
+
+ pt_asid_init(&ifix->asid[0]);
+ ifix->asid[0].cr3 = 0xa000;
+
+ pt_asid_init(&ifix->asid[1]);
+ ifix->asid[1].cr3 = 0xb000;
+
+ pt_asid_init(&ifix->asid[2]);
+ ifix->asid[2].cr3 = 0xc000;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result rfix_init(struct image_fixture *ifix)
+{
+ int status;
+
+ ptu_check(ifix_init, ifix);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 10);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[1],
+ 0x2000ull, 11);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result dfix_fini(struct image_fixture *ifix)
+{
+ pt_image_fini(&ifix->image);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ifix_fini(struct image_fixture *ifix)
+{
+ int sec;
+
+ ptu_check(dfix_fini, ifix);
+
+ pt_image_fini(&ifix->copy);
+
+ for (sec = 0; sec < ifix_nsecs; ++sec) {
+ ptu_int_eq(ifix->section[sec].ucount, 0);
+ ptu_int_eq(ifix->section[sec].mcount, 0);
+ ptu_int_le(ifix->status[sec].deleted, 1);
+ ptu_int_eq(ifix->status[sec].bad_put, 0);
+ }
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct image_fixture dfix, ifix, rfix;
+ struct ptunit_suite suite;
+
+ /* Dfix provides image destruction. */
+ dfix.init = NULL;
+ dfix.fini = dfix_fini;
+
+ /* Ifix provides an empty image. */
+ ifix.init = ifix_init;
+ ifix.fini = ifix_fini;
+
+ /* Rfix provides an image with two sections added. */
+ rfix.init = rfix_init;
+ rfix.fini = ifix_fini;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init);
+ ptu_run_f(suite, init_name, dfix);
+ ptu_run(suite, init_null);
+
+ ptu_run(suite, fini);
+ ptu_run(suite, fini_empty);
+ ptu_run(suite, fini_null);
+
+ ptu_run_f(suite, name, dfix);
+ ptu_run(suite, name_none);
+ ptu_run(suite, name_null);
+
+ ptu_run_f(suite, read_empty, ifix);
+ ptu_run_f(suite, overlap_front, ifix);
+ ptu_run_f(suite, overlap_back, ifix);
+ ptu_run_f(suite, overlap_multiple, ifix);
+ ptu_run_f(suite, overlap_mid, ifix);
+ ptu_run_f(suite, contained, ifix);
+ ptu_run_f(suite, contained_multiple, ifix);
+ ptu_run_f(suite, contained_back, ifix);
+ ptu_run_f(suite, same, ifix);
+ ptu_run_f(suite, same_different_isid, ifix);
+ ptu_run_f(suite, same_different_offset, ifix);
+ ptu_run_f(suite, adjacent, ifix);
+
+ ptu_run_f(suite, read_null, rfix);
+ ptu_run_f(suite, read, rfix);
+ ptu_run_f(suite, read_null, rfix);
+ ptu_run_f(suite, read_asid, ifix);
+ ptu_run_f(suite, read_bad_asid, rfix);
+ ptu_run_f(suite, read_null_asid, rfix);
+ ptu_run_f(suite, read_callback, rfix);
+ ptu_run_f(suite, read_nomem, rfix);
+ ptu_run_f(suite, read_truncated, rfix);
+ ptu_run_f(suite, read_error, rfix);
+ ptu_run_f(suite, read_spurious_error, rfix);
+
+ ptu_run_f(suite, remove_section, rfix);
+ ptu_run_f(suite, remove_bad_vaddr, rfix);
+ ptu_run_f(suite, remove_bad_asid, rfix);
+ ptu_run_f(suite, remove_by_filename, rfix);
+ ptu_run_f(suite, remove_by_filename_bad_asid, rfix);
+ ptu_run_f(suite, remove_none_by_filename, rfix);
+ ptu_run_f(suite, remove_all_by_filename, ifix);
+ ptu_run_f(suite, remove_by_asid, rfix);
+
+ ptu_run_f(suite, copy_empty, ifix);
+ ptu_run_f(suite, copy, rfix);
+ ptu_run_f(suite, copy_self, rfix);
+ ptu_run_f(suite, copy_shrink, rfix);
+ ptu_run_f(suite, copy_split, ifix);
+ ptu_run_f(suite, copy_merge, ifix);
+ ptu_run_f(suite, copy_overlap, ifix);
+ ptu_run_f(suite, copy_replace, ifix);
+
+ ptu_run(suite, add_cached_null);
+ ptu_run_f(suite, add_cached, ifix);
+ ptu_run_f(suite, add_cached_null_asid, ifix);
+ ptu_run_f(suite, add_cached_twice, ifix);
+ ptu_run_f(suite, add_cached_bad_isid, ifix);
+
+ ptu_run_f(suite, find_null, rfix);
+ ptu_run_f(suite, find, rfix);
+ ptu_run_f(suite, find_asid, ifix);
+ ptu_run_f(suite, find_bad_asid, rfix);
+ ptu_run_f(suite, find_nomem, rfix);
+
+ ptu_run_f(suite, validate_null, rfix);
+ ptu_run_f(suite, validate, rfix);
+ ptu_run_f(suite, validate_bad_asid, rfix);
+ ptu_run_f(suite, validate_bad_vaddr, rfix);
+ ptu_run_f(suite, validate_bad_offset, rfix);
+ ptu_run_f(suite, validate_bad_size, rfix);
+ ptu_run_f(suite, validate_bad_isid, rfix);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-image_section_cache.c b/libipt/test/src/ptunit-image_section_cache.c
new file mode 100644
index 000000000000..1b460b00d996
--- /dev/null
+++ b/libipt/test/src/ptunit-image_section_cache.c
@@ -0,0 +1,2027 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_image_section_cache.h"
+
+#include "ptunit_threads.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+
+
+struct pt_section {
+ /* The filename. We only support string literals for testing. */
+ const char *filename;
+
+ /* The file offset and size. */
+ uint64_t offset;
+ uint64_t size;
+
+ /* The bcache size. */
+ uint64_t bcsize;
+
+ /* The iscache back link. */
+ struct pt_image_section_cache *iscache;
+
+ /* The file content. */
+ uint8_t content[0x10];
+
+ /* The use count. */
+ int ucount;
+
+ /* The attach count. */
+ int acount;
+
+ /* The map count. */
+ int mcount;
+
+#if defined(FEATURE_THREADS)
+ /* A lock protecting this section. */
+ mtx_t lock;
+ /* A lock protecting the iscache and acount fields. */
+ mtx_t alock;
+#endif /* defined(FEATURE_THREADS) */
+};
+
+extern struct pt_section *pt_mk_section(const char *filename, uint64_t offset,
+ uint64_t size);
+
+extern int pt_section_get(struct pt_section *section);
+extern int pt_section_put(struct pt_section *section);
+extern int pt_section_attach(struct pt_section *section,
+ struct pt_image_section_cache *iscache);
+extern int pt_section_detach(struct pt_section *section,
+ struct pt_image_section_cache *iscache);
+
+extern int pt_section_map(struct pt_section *section);
+extern int pt_section_map_share(struct pt_section *section);
+extern int pt_section_unmap(struct pt_section *section);
+extern int pt_section_request_bcache(struct pt_section *section);
+
+extern const char *pt_section_filename(const struct pt_section *section);
+extern uint64_t pt_section_offset(const struct pt_section *section);
+extern uint64_t pt_section_size(const struct pt_section *section);
+extern int pt_section_memsize(struct pt_section *section, uint64_t *size);
+
+extern int pt_section_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset);
+
+
+struct pt_section *pt_mk_section(const char *filename, uint64_t offset,
+ uint64_t size)
+{
+ struct pt_section *section;
+
+ section = malloc(sizeof(*section));
+ if (section) {
+ uint8_t idx;
+
+ memset(section, 0, sizeof(*section));
+ section->filename = filename;
+ section->offset = offset;
+ section->size = size;
+ section->ucount = 1;
+
+ for (idx = 0; idx < sizeof(section->content); ++idx)
+ section->content[idx] = idx;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_init(&section->lock, mtx_plain);
+ if (errcode != thrd_success) {
+ free(section);
+ section = NULL;
+ }
+
+ errcode = mtx_init(&section->alock, mtx_plain);
+ if (errcode != thrd_success) {
+ mtx_destroy(&section->lock);
+ free(section);
+ section = NULL;
+ }
+ }
+#endif /* defined(FEATURE_THREADS) */
+ }
+
+ return section;
+}
+
+static int pt_section_lock(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&section->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static int pt_section_unlock(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&section->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static int pt_section_lock_attach(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&section->alock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static int pt_section_unlock_attach(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&section->alock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+int pt_section_get(struct pt_section *section)
+{
+ int errcode, ucount;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ ucount = ++section->ucount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (!ucount)
+ return -pte_internal;
+
+ return 0;
+}
+
+int pt_section_put(struct pt_section *section)
+{
+ int errcode, ucount;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ ucount = --section->ucount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (!ucount) {
+#if defined(FEATURE_THREADS)
+ mtx_destroy(&section->alock);
+ mtx_destroy(&section->lock);
+#endif /* defined(FEATURE_THREADS) */
+ free(section);
+ }
+
+ return 0;
+}
+
+int pt_section_attach(struct pt_section *section,
+ struct pt_image_section_cache *iscache)
+{
+ int errcode, ucount, acount;
+
+ if (!section || !iscache)
+ return -pte_internal;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ ucount = section->ucount;
+ acount = section->acount;
+ if (!acount) {
+ if (section->iscache || !ucount)
+ goto out_unlock;
+
+ section->iscache = iscache;
+ section->acount = 1;
+
+ return pt_section_unlock_attach(section);
+ }
+
+ acount += 1;
+ if (!acount) {
+ (void) pt_section_unlock_attach(section);
+ return -pte_overflow;
+ }
+
+ if (ucount < acount)
+ goto out_unlock;
+
+ if (section->iscache != iscache)
+ goto out_unlock;
+
+ section->acount = acount;
+
+ return pt_section_unlock_attach(section);
+
+ out_unlock:
+ (void) pt_section_unlock_attach(section);
+ return -pte_internal;
+}
+
+int pt_section_detach(struct pt_section *section,
+ struct pt_image_section_cache *iscache)
+{
+ int errcode, ucount, acount;
+
+ if (!section || !iscache)
+ return -pte_internal;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (section->iscache != iscache)
+ goto out_unlock;
+
+ acount = section->acount;
+ if (!acount)
+ goto out_unlock;
+
+ acount -= 1;
+ ucount = section->ucount;
+ if (ucount < acount)
+ goto out_unlock;
+
+ section->acount = acount;
+ if (!acount)
+ section->iscache = NULL;
+
+ return pt_section_unlock_attach(section);
+
+ out_unlock:
+ (void) pt_section_unlock_attach(section);
+ return -pte_internal;
+}
+
+int pt_section_map(struct pt_section *section)
+{
+ struct pt_image_section_cache *iscache;
+ int errcode, status;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_map_share(section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ status = 0;
+ iscache = section->iscache;
+ if (iscache)
+ status = pt_iscache_notify_map(iscache, section);
+
+ errcode = pt_section_unlock_attach(section);
+
+ return (status < 0) ? status : errcode;
+}
+
+int pt_section_map_share(struct pt_section *section)
+{
+ int errcode, mcount;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ mcount = ++section->mcount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (mcount <= 0)
+ return -pte_internal;
+
+ return 0;
+}
+
+int pt_section_unmap(struct pt_section *section)
+{
+ int errcode, mcount;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ section->bcsize = 0ull;
+ mcount = --section->mcount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (mcount < 0)
+ return -pte_internal;
+
+ return 0;
+}
+
+int pt_section_request_bcache(struct pt_section *section)
+{
+ struct pt_image_section_cache *iscache;
+ uint64_t memsize;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ goto out_alock;
+
+ if (section->bcsize)
+ goto out_lock;
+
+ section->bcsize = section->size * 3;
+ memsize = section->size + section->bcsize;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ goto out_alock;
+
+ iscache = section->iscache;
+ if (iscache) {
+ errcode = pt_iscache_notify_resize(iscache, section, memsize);
+ if (errcode < 0)
+ goto out_alock;
+ }
+
+ return pt_section_unlock_attach(section);
+
+
+out_lock:
+ (void) pt_section_unlock(section);
+
+out_alock:
+ (void) pt_section_unlock_attach(section);
+ return errcode;
+}
+
+const char *pt_section_filename(const struct pt_section *section)
+{
+ if (!section)
+ return NULL;
+
+ return section->filename;
+}
+
+uint64_t pt_section_offset(const struct pt_section *section)
+{
+ if (!section)
+ return 0ull;
+
+ return section->offset;
+}
+
+uint64_t pt_section_size(const struct pt_section *section)
+{
+ if (!section)
+ return 0ull;
+
+ return section->size;
+}
+
+int pt_section_memsize(struct pt_section *section, uint64_t *size)
+{
+ if (!section || !size)
+ return -pte_internal;
+
+ *size = section->mcount ? section->size + section->bcsize : 0ull;
+
+ return 0;
+}
+
+int pt_section_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ uint64_t begin, end, max;
+
+ if (!section || !buffer)
+ return -pte_internal;
+
+ begin = offset;
+ end = begin + size;
+ max = sizeof(section->content);
+
+ if (max <= begin)
+ return -pte_nomap;
+
+ if (max < end)
+ end = max;
+
+ if (end <= begin)
+ return -pte_invalid;
+
+ memcpy(buffer, &section->content[begin], (size_t) (end - begin));
+ return (int) (end - begin);
+}
+
+enum {
+ /* The number of test sections. */
+ num_sections = 8,
+
+#if defined(FEATURE_THREADS)
+
+ num_threads = 8,
+
+#endif /* defined(FEATURE_THREADS) */
+
+ num_iterations = 0x1000
+};
+
+struct iscache_fixture {
+ /* Threading support. */
+ struct ptunit_thrd_fixture thrd;
+
+ /* The image section cache under test. */
+ struct pt_image_section_cache iscache;
+
+ /* A bunch of test sections. */
+ struct pt_section *section[num_sections];
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct iscache_fixture *);
+ struct ptunit_result (*fini)(struct iscache_fixture *);
+};
+
+static struct ptunit_result dfix_init(struct iscache_fixture *cfix)
+{
+ int idx;
+
+ ptu_test(ptunit_thrd_init, &cfix->thrd);
+
+ memset(cfix->section, 0, sizeof(cfix->section));
+
+ for (idx = 0; idx < num_sections; ++idx) {
+ struct pt_section *section;
+
+ section = pt_mk_section("some-filename",
+ idx % 3 == 0 ? 0x1000 : 0x2000,
+ idx % 2 == 0 ? 0x1000 : 0x2000);
+ ptu_ptr(section);
+
+ cfix->section[idx] = section;
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cfix_init(struct iscache_fixture *cfix)
+{
+ int errcode;
+
+ ptu_test(dfix_init, cfix);
+
+ errcode = pt_iscache_init(&cfix->iscache, NULL);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sfix_init(struct iscache_fixture *cfix)
+{
+ int status, idx;
+
+ ptu_test(cfix_init, cfix);
+
+ cfix->iscache.limit = 0x7800;
+
+ for (idx = 0; idx < num_sections; ++idx) {
+ status = pt_iscache_add(&cfix->iscache, cfix->section[idx],
+ 0ull);
+ ptu_int_ge(status, 0);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cfix_fini(struct iscache_fixture *cfix)
+{
+ int idx, errcode;
+
+ ptu_test(ptunit_thrd_fini, &cfix->thrd);
+
+ for (idx = 0; idx < cfix->thrd.nthreads; ++idx)
+ ptu_int_eq(cfix->thrd.result[idx], 0);
+
+ pt_iscache_fini(&cfix->iscache);
+
+ for (idx = 0; idx < num_sections; ++idx) {
+ ptu_int_eq(cfix->section[idx]->ucount, 1);
+ ptu_int_eq(cfix->section[idx]->acount, 0);
+ ptu_int_eq(cfix->section[idx]->mcount, 0);
+ ptu_null(cfix->section[idx]->iscache);
+
+ errcode = pt_section_put(cfix->section[idx]);
+ ptu_int_eq(errcode, 0);
+ }
+
+ return ptu_passed();
+}
+
+
+static struct ptunit_result init_null(void)
+{
+ int errcode;
+
+ errcode = pt_iscache_init(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fini_null(void)
+{
+ pt_iscache_fini(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result name_null(void)
+{
+ const char *name;
+
+ name = pt_iscache_name(NULL);
+ ptu_null(name);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_null(void)
+{
+ struct pt_image_section_cache iscache;
+ struct pt_section section;
+ int errcode;
+
+ errcode = pt_iscache_add(NULL, &section, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_iscache_add(&iscache, NULL, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_null(void)
+{
+ int errcode;
+
+ errcode = pt_iscache_find(NULL, "filename", 0ull, 0ull, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lookup_null(void)
+{
+ struct pt_image_section_cache iscache;
+ struct pt_section *section;
+ uint64_t laddr;
+ int errcode;
+
+ errcode = pt_iscache_lookup(NULL, &section, &laddr, 0);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_iscache_lookup(&iscache, NULL, &laddr, 0);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_iscache_lookup(&iscache, &section, NULL, 0);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result clear_null(void)
+{
+ int errcode;
+
+ errcode = pt_iscache_clear(NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result free_null(void)
+{
+ pt_iscache_free(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_file_null(void)
+{
+ struct pt_image_section_cache iscache;
+ int errcode;
+
+ errcode = pt_iscache_add_file(NULL, "filename", 0ull, 0ull, 0ull);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_iscache_add_file(&iscache, NULL, 0ull, 0ull, 0ull);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_null(void)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t buffer;
+ int errcode;
+
+ errcode = pt_iscache_read(NULL, &buffer, sizeof(buffer), 1ull, 0ull);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_iscache_read(&iscache, NULL, sizeof(buffer), 1ull, 0ull);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_iscache_read(&iscache, &buffer, 0ull, 1, 0ull);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_fini(struct iscache_fixture *cfix)
+{
+ (void) cfix;
+
+ /* The actual init and fini calls are in cfix_init() and cfix_fini(). */
+ return ptu_passed();
+}
+
+static struct ptunit_result name(struct iscache_fixture *cfix)
+{
+ const char *name;
+
+ pt_iscache_init(&cfix->iscache, "iscache-name");
+
+ name = pt_iscache_name(&cfix->iscache);
+ ptu_str_eq(name, "iscache-name");
+
+ return ptu_passed();
+}
+
+static struct ptunit_result name_none(struct iscache_fixture *cfix)
+{
+ const char *name;
+
+ pt_iscache_init(&cfix->iscache, NULL);
+
+ name = pt_iscache_name(&cfix->iscache);
+ ptu_null(name);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add(struct iscache_fixture *cfix)
+{
+ int isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid, 0);
+
+ /* The cache attaches and gets a reference on success. */
+ ptu_int_eq(cfix->section[0]->ucount, 2);
+ ptu_int_eq(cfix->section[0]->acount, 1);
+
+ /* The added section must be implicitly put in pt_iscache_fini. */
+ return ptu_passed();
+}
+
+static struct ptunit_result add_no_name(struct iscache_fixture *cfix)
+{
+ struct pt_section section;
+ int errcode;
+
+ memset(&section, 0, sizeof(section));
+
+ errcode = pt_iscache_add(&cfix->iscache, &section, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_file(struct iscache_fixture *cfix)
+{
+ int isid;
+
+ isid = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull);
+ ptu_int_gt(isid, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int found, isid;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ isid = pt_iscache_add(&cfix->iscache, section, 0ull);
+ ptu_int_gt(isid, 0);
+
+ found = pt_iscache_find(&cfix->iscache, section->filename,
+ section->offset, section->size, 0ull);
+ ptu_int_eq(found, isid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_empty(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int found;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ found = pt_iscache_find(&cfix->iscache, section->filename,
+ section->offset, section->size, 0ull);
+ ptu_int_eq(found, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_bad_filename(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int found, isid;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ isid = pt_iscache_add(&cfix->iscache, section, 0ull);
+ ptu_int_gt(isid, 0);
+
+ found = pt_iscache_find(&cfix->iscache, "bad-filename",
+ section->offset, section->size, 0ull);
+ ptu_int_eq(found, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_null_filename(struct iscache_fixture *cfix)
+{
+ int errcode;
+
+ errcode = pt_iscache_find(&cfix->iscache, NULL, 0ull, 0ull, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_bad_offset(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int found, isid;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ isid = pt_iscache_add(&cfix->iscache, section, 0ull);
+ ptu_int_gt(isid, 0);
+
+ found = pt_iscache_find(&cfix->iscache, section->filename, 0ull,
+ section->size, 0ull);
+ ptu_int_eq(found, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_bad_size(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int found, isid;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ isid = pt_iscache_add(&cfix->iscache, section, 0ull);
+ ptu_int_gt(isid, 0);
+
+ found = pt_iscache_find(&cfix->iscache, section->filename,
+ section->offset, 0ull, 0ull);
+ ptu_int_eq(found, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_bad_laddr(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int found, isid;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ isid = pt_iscache_add(&cfix->iscache, section, 0ull);
+ ptu_int_gt(isid, 0);
+
+ found = pt_iscache_find(&cfix->iscache, section->filename,
+ section->offset, section->size, 1ull);
+ ptu_int_eq(found, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lookup(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ uint64_t laddr;
+ int errcode, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid, 0);
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section, &laddr, isid);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(section, cfix->section[0]);
+ ptu_uint_eq(laddr, 0ull);
+
+ errcode = pt_section_put(section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lookup_bad_isid(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ uint64_t laddr;
+ int errcode, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid, 0);
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section, &laddr, 0);
+ ptu_int_eq(errcode, -pte_bad_image);
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section, &laddr, -isid);
+ ptu_int_eq(errcode, -pte_bad_image);
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section, &laddr, isid + 1);
+ ptu_int_eq(errcode, -pte_bad_image);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result clear_empty(struct iscache_fixture *cfix)
+{
+ int errcode;
+
+ errcode = pt_iscache_clear(&cfix->iscache);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result clear_find(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int errcode, found, isid;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ isid = pt_iscache_add(&cfix->iscache, section, 0ull);
+ ptu_int_gt(isid, 0);
+
+ errcode = pt_iscache_clear(&cfix->iscache);
+ ptu_int_eq(errcode, 0);
+
+
+ found = pt_iscache_find(&cfix->iscache, section->filename,
+ section->offset, section->size, 0ull);
+ ptu_int_eq(found, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result clear_lookup(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ uint64_t laddr;
+ int errcode, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid, 0);
+
+ errcode = pt_iscache_clear(&cfix->iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section, &laddr, isid);
+ ptu_int_eq(errcode, -pte_bad_image);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_twice(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* The second add should be ignored. */
+ ptu_int_eq(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_same(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ cfix->section[1]->offset = cfix->section[0]->offset;
+ cfix->section[1]->size = cfix->section[0]->size;
+
+ isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[1], 0ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* The second add should be ignored. */
+ ptu_int_eq(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+add_twice_different_laddr(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[0], 1ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* We must get different identifiers. */
+ ptu_int_ne(isid[1], isid[0]);
+
+ /* We attach twice and take two references - one for each entry. */
+ ptu_int_eq(cfix->section[0]->ucount, 3);
+ ptu_int_eq(cfix->section[0]->acount, 2);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+add_same_different_laddr(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ cfix->section[1]->offset = cfix->section[0]->offset;
+ cfix->section[1]->size = cfix->section[0]->size;
+
+ isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[1], 1ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* We must get different identifiers. */
+ ptu_int_ne(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+add_different_same_laddr(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[1], 0ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* We must get different identifiers. */
+ ptu_int_ne(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_file_same(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ isid[1] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* The second add should be ignored. */
+ ptu_int_eq(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+add_file_same_different_laddr(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ isid[1] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 1ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* We must get different identifiers. */
+ ptu_int_ne(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+add_file_different_same_laddr(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ isid[1] = pt_iscache_add_file(&cfix->iscache, "name", 1ull, 1ull, 0ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* We must get different identifiers. */
+ ptu_int_ne(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read(struct iscache_fixture *cfix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_iscache_read(&cfix->iscache, buffer, 2ull, isid, 0xa008ull);
+ ptu_int_eq(status, 2);
+ ptu_uint_eq(buffer[0], 0x8);
+ ptu_uint_eq(buffer[1], 0x9);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_truncate(struct iscache_fixture *cfix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_iscache_read(&cfix->iscache, buffer, sizeof(buffer), isid,
+ 0xa00full);
+ ptu_int_eq(status, 1);
+ ptu_uint_eq(buffer[0], 0xf);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_bad_vaddr(struct iscache_fixture *cfix)
+{
+ uint8_t buffer[] = { 0xcc };
+ int status, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_iscache_read(&cfix->iscache, buffer, 1ull, isid, 0xb000ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_bad_isid(struct iscache_fixture *cfix)
+{
+ uint8_t buffer[] = { 0xcc };
+ int status, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_iscache_read(&cfix->iscache, buffer, 1ull, isid + 1,
+ 0xa000ull);
+ ptu_int_eq(status, -pte_bad_image);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_map(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[0]);
+ ptu_null(cfix->iscache.lru->next);
+ ptu_uint_eq(cfix->iscache.used, cfix->section[0]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_read(struct iscache_fixture *cfix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_iscache_read(&cfix->iscache, buffer, 2ull, isid, 0xa008ull);
+ ptu_int_eq(status, 2);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[0]);
+ ptu_null(cfix->iscache.lru->next);
+ ptu_uint_eq(cfix->iscache.used, cfix->section[0]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_map_nodup(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = 2 * cfix->section[0]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[0]);
+ ptu_null(cfix->iscache.lru->next);
+ ptu_uint_eq(cfix->iscache.used, cfix->section[0]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_map_too_big(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size - 1;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_null(cfix->iscache.lru);
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_map_add_front(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size + cfix->section[1]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[1], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[1]);
+ ptu_ptr(cfix->iscache.lru->next);
+ ptu_ptr_eq(cfix->iscache.lru->next->section, cfix->section[0]);
+ ptu_null(cfix->iscache.lru->next->next);
+ ptu_uint_eq(cfix->iscache.used,
+ cfix->section[0]->size + cfix->section[1]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_map_move_front(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size + cfix->section[1]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[1], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[0]);
+ ptu_ptr(cfix->iscache.lru->next);
+ ptu_ptr_eq(cfix->iscache.lru->next->section, cfix->section[1]);
+ ptu_null(cfix->iscache.lru->next->next);
+ ptu_uint_eq(cfix->iscache.used,
+ cfix->section[0]->size + cfix->section[1]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_map_evict(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size +
+ cfix->section[1]->size - 1;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[1], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[1]);
+ ptu_null(cfix->iscache.lru->next);
+ ptu_uint_eq(cfix->iscache.used, cfix->section[1]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_bcache_evict(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = 4 * cfix->section[0]->size +
+ cfix->section[1]->size - 1;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[1], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_request_bcache(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[0]);
+ ptu_null(cfix->iscache.lru->next);
+ ptu_uint_eq(cfix->iscache.used, 4 * cfix->section[0]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_bcache_clear(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size + cfix->section[1]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[1], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_request_bcache(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_null(cfix->iscache.lru);
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_limit_evict(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size + cfix->section[1]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[1], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_iscache_set_limit(&cfix->iscache,
+ cfix->section[0]->size +
+ cfix->section[1]->size - 1);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[1]);
+ ptu_null(cfix->iscache.lru->next);
+ ptu_uint_eq(cfix->iscache.used, cfix->section[1]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_clear(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_iscache_clear(&cfix->iscache);
+ ptu_int_eq(status, 0);
+
+ ptu_null(cfix->iscache.lru);
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+
+ return ptu_passed();
+}
+
+static int worker_add(void *arg)
+{
+ struct iscache_fixture *cfix;
+ int it;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_iterations; ++it) {
+ uint64_t laddr;
+ int sec;
+
+ laddr = 0x1000ull * (it % 23);
+
+ for (sec = 0; sec < num_sections; ++sec) {
+ struct pt_section *section;
+ uint64_t addr;
+ int isid, errcode;
+
+ isid = pt_iscache_add(&cfix->iscache,
+ cfix->section[sec], laddr);
+ if (isid < 0)
+ return isid;
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section,
+ &addr, isid);
+ if (errcode < 0)
+ return errcode;
+
+ if (laddr != addr)
+ return -pte_noip;
+
+ /* We may not get the image we added but the image we
+ * get must have similar attributes.
+ *
+ * We're using the same filename string literal for all
+ * sections, though.
+ */
+ if (section->offset != cfix->section[sec]->offset)
+ return -pte_bad_image;
+
+ if (section->size != cfix->section[sec]->size)
+ return -pte_bad_image;
+
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+ }
+ }
+
+ return 0;
+}
+
+static int worker_add_file(void *arg)
+{
+ struct iscache_fixture *cfix;
+ int it;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_iterations; ++it) {
+ uint64_t offset, size, laddr;
+ int sec;
+
+ offset = it % 7 == 0 ? 0x1000 : 0x2000;
+ size = it % 5 == 0 ? 0x1000 : 0x2000;
+ laddr = it % 3 == 0 ? 0x1000 : 0x2000;
+
+ for (sec = 0; sec < num_sections; ++sec) {
+ struct pt_section *section;
+ uint64_t addr;
+ int isid, errcode;
+
+ isid = pt_iscache_add_file(&cfix->iscache, "name",
+ offset, size, laddr);
+ if (isid < 0)
+ return isid;
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section,
+ &addr, isid);
+ if (errcode < 0)
+ return errcode;
+
+ if (laddr != addr)
+ return -pte_noip;
+
+ if (section->offset != offset)
+ return -pte_bad_image;
+
+ if (section->size != size)
+ return -pte_bad_image;
+
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+ }
+ }
+
+ return 0;
+}
+
+static int worker_map(void *arg)
+{
+ struct iscache_fixture *cfix;
+ int it, sec, status;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_iterations; ++it) {
+ for (sec = 0; sec < num_sections; ++sec) {
+
+ status = pt_section_map(cfix->section[sec]);
+ if (status < 0)
+ return status;
+
+ status = pt_section_unmap(cfix->section[sec]);
+ if (status < 0)
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int worker_map_limit(void *arg)
+{
+ struct iscache_fixture *cfix;
+ uint64_t limits[] = { 0x8000, 0x3000, 0x12000, 0x0 }, limit;
+ int it, sec, errcode, lim;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ lim = 0;
+ for (it = 0; it < num_iterations; ++it) {
+ for (sec = 0; sec < num_sections; ++sec) {
+
+ errcode = pt_section_map(cfix->section[sec]);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_unmap(cfix->section[sec]);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ if (it % 23 != 0)
+ continue;
+
+ limit = limits[lim++];
+ lim %= sizeof(limits) / sizeof(*limits);
+
+ errcode = pt_iscache_set_limit(&cfix->iscache, limit);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+}
+
+static int worker_map_bcache(void *arg)
+{
+ struct iscache_fixture *cfix;
+ int it, sec, status;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_iterations; ++it) {
+ for (sec = 0; sec < num_sections; ++sec) {
+ struct pt_section *section;
+
+ section = cfix->section[sec];
+
+ status = pt_section_map(section);
+ if (status < 0)
+ return status;
+
+ if (it % 13 == 0) {
+ status = pt_section_request_bcache(section);
+ if (status < 0) {
+ (void) pt_section_unmap(section);
+ return status;
+ }
+ }
+
+ status = pt_section_unmap(section);
+ if (status < 0)
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int worker_add_map(void *arg)
+{
+ struct iscache_fixture *cfix;
+ struct pt_section *section;
+ int it;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ section = cfix->section[0];
+ for (it = 0; it < num_iterations; ++it) {
+ uint64_t laddr;
+ int isid, errcode;
+
+ laddr = (uint64_t) it << 3;
+
+ isid = pt_iscache_add(&cfix->iscache, section, laddr);
+ if (isid < 0)
+ return isid;
+
+ errcode = pt_section_map(section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_unmap(section);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+}
+
+static int worker_add_clear(void *arg)
+{
+ struct iscache_fixture *cfix;
+ struct pt_section *section;
+ int it;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ section = cfix->section[0];
+ for (it = 0; it < num_iterations; ++it) {
+ uint64_t laddr;
+ int isid, errcode;
+
+ laddr = (uint64_t) it << 3;
+
+ isid = pt_iscache_add(&cfix->iscache, section, laddr);
+ if (isid < 0)
+ return isid;
+
+ errcode = pt_iscache_clear(&cfix->iscache);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+}
+
+static int worker_add_file_map(void *arg)
+{
+ struct iscache_fixture *cfix;
+ int it;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_iterations; ++it) {
+ struct pt_section *section;
+ uint64_t offset, size, laddr, addr;
+ int isid, errcode;
+
+ offset = it % 7 < 4 ? 0x1000 : 0x2000;
+ size = it % 5 < 3 ? 0x1000 : 0x2000;
+ laddr = it % 3 < 2 ? 0x1000 : 0x2000;
+
+ isid = pt_iscache_add_file(&cfix->iscache, "name",
+ offset, size, laddr);
+ if (isid < 0)
+ return isid;
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section,
+ &addr, isid);
+ if (errcode < 0)
+ return errcode;
+
+ if (addr != laddr)
+ return -pte_internal;
+
+ errcode = pt_section_map(section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_unmap(section);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+}
+
+static int worker_add_file_clear(void *arg)
+{
+ struct iscache_fixture *cfix;
+ int it;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_iterations; ++it) {
+ uint64_t offset, size, laddr;
+ int isid, errcode;
+
+ offset = it % 7 < 4 ? 0x1000 : 0x2000;
+ size = it % 5 < 3 ? 0x1000 : 0x2000;
+ laddr = it % 3 < 2 ? 0x1000 : 0x2000;
+
+ isid = pt_iscache_add_file(&cfix->iscache, "name",
+ offset, size, laddr);
+ if (isid < 0)
+ return isid;
+
+ if (it % 11 < 9)
+ continue;
+
+ errcode = pt_iscache_clear(&cfix->iscache);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+}
+
+static struct ptunit_result stress(struct iscache_fixture *cfix,
+ int (*worker)(void *))
+{
+ int errcode;
+
+#if defined(FEATURE_THREADS)
+ {
+ int thrd;
+
+ for (thrd = 0; thrd < num_threads; ++thrd)
+ ptu_test(ptunit_thrd_create, &cfix->thrd, worker, cfix);
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ errcode = worker(cfix);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+int main(int argc, char **argv)
+{
+ struct iscache_fixture cfix, dfix, sfix;
+ struct ptunit_suite suite;
+
+ cfix.init = cfix_init;
+ cfix.fini = cfix_fini;
+
+ dfix.init = dfix_init;
+ dfix.fini = cfix_fini;
+
+ sfix.init = sfix_init;
+ sfix.fini = cfix_fini;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init_null);
+ ptu_run(suite, fini_null);
+ ptu_run(suite, name_null);
+ ptu_run(suite, add_null);
+ ptu_run(suite, find_null);
+ ptu_run(suite, lookup_null);
+ ptu_run(suite, clear_null);
+ ptu_run(suite, free_null);
+ ptu_run(suite, add_file_null);
+ ptu_run(suite, read_null);
+
+ ptu_run_f(suite, name, dfix);
+ ptu_run_f(suite, name_none, dfix);
+
+ ptu_run_f(suite, init_fini, cfix);
+ ptu_run_f(suite, add, cfix);
+ ptu_run_f(suite, add_no_name, cfix);
+ ptu_run_f(suite, add_file, cfix);
+
+ ptu_run_f(suite, find, cfix);
+ ptu_run_f(suite, find_empty, cfix);
+ ptu_run_f(suite, find_bad_filename, cfix);
+ ptu_run_f(suite, find_null_filename, cfix);
+ ptu_run_f(suite, find_bad_offset, cfix);
+ ptu_run_f(suite, find_bad_size, cfix);
+ ptu_run_f(suite, find_bad_laddr, cfix);
+
+ ptu_run_f(suite, lookup, cfix);
+ ptu_run_f(suite, lookup_bad_isid, cfix);
+
+ ptu_run_f(suite, clear_empty, cfix);
+ ptu_run_f(suite, clear_find, cfix);
+ ptu_run_f(suite, clear_lookup, cfix);
+
+ ptu_run_f(suite, add_twice, cfix);
+ ptu_run_f(suite, add_same, cfix);
+ ptu_run_f(suite, add_twice_different_laddr, cfix);
+ ptu_run_f(suite, add_same_different_laddr, cfix);
+ ptu_run_f(suite, add_different_same_laddr, cfix);
+
+ ptu_run_f(suite, add_file_same, cfix);
+ ptu_run_f(suite, add_file_same_different_laddr, cfix);
+ ptu_run_f(suite, add_file_different_same_laddr, cfix);
+
+ ptu_run_f(suite, read, cfix);
+ ptu_run_f(suite, read_truncate, cfix);
+ ptu_run_f(suite, read_bad_vaddr, cfix);
+ ptu_run_f(suite, read_bad_isid, cfix);
+
+ ptu_run_f(suite, lru_map, cfix);
+ ptu_run_f(suite, lru_read, cfix);
+ ptu_run_f(suite, lru_map_nodup, cfix);
+ ptu_run_f(suite, lru_map_too_big, cfix);
+ ptu_run_f(suite, lru_map_add_front, cfix);
+ ptu_run_f(suite, lru_map_move_front, cfix);
+ ptu_run_f(suite, lru_map_evict, cfix);
+ ptu_run_f(suite, lru_limit_evict, cfix);
+ ptu_run_f(suite, lru_bcache_evict, cfix);
+ ptu_run_f(suite, lru_bcache_clear, cfix);
+ ptu_run_f(suite, lru_clear, cfix);
+
+ ptu_run_fp(suite, stress, cfix, worker_add);
+ ptu_run_fp(suite, stress, cfix, worker_add_file);
+ ptu_run_fp(suite, stress, sfix, worker_map);
+ ptu_run_fp(suite, stress, sfix, worker_map_limit);
+ ptu_run_fp(suite, stress, sfix, worker_map_bcache);
+ ptu_run_fp(suite, stress, cfix, worker_add_map);
+ ptu_run_fp(suite, stress, cfix, worker_add_clear);
+ ptu_run_fp(suite, stress, cfix, worker_add_file_map);
+ ptu_run_fp(suite, stress, cfix, worker_add_file_clear);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-last_ip.c b/libipt/test/src/ptunit-last_ip.c
new file mode 100644
index 000000000000..bac47a33c3ed
--- /dev/null
+++ b/libipt/test/src/ptunit-last_ip.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_last_ip.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+
+
+static struct ptunit_result init(void)
+{
+ struct pt_last_ip last_ip;
+
+ memset(&last_ip, 0xcd, sizeof(last_ip));
+
+ pt_last_ip_init(&last_ip);
+
+ ptu_uint_eq(last_ip.ip, 0ull);
+ ptu_uint_eq(last_ip.have_ip, 0);
+ ptu_uint_eq(last_ip.suppressed, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_null(void)
+{
+ pt_last_ip_init(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result status_initial(void)
+{
+ struct pt_last_ip last_ip;
+ int errcode;
+
+ pt_last_ip_init(&last_ip);
+
+ errcode = pt_last_ip_query(NULL, &last_ip);
+ ptu_int_eq(errcode, -pte_noip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result status(void)
+{
+ struct pt_last_ip last_ip;
+ int errcode;
+
+ last_ip.have_ip = 1;
+ last_ip.suppressed = 0;
+
+ errcode = pt_last_ip_query(NULL, &last_ip);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result status_null(void)
+{
+ int errcode;
+
+ errcode = pt_last_ip_query(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result status_noip(void)
+{
+ struct pt_last_ip last_ip;
+ int errcode;
+
+ last_ip.have_ip = 0;
+ last_ip.suppressed = 0;
+
+ errcode = pt_last_ip_query(NULL, &last_ip);
+ ptu_int_eq(errcode, -pte_noip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result status_suppressed(void)
+{
+ struct pt_last_ip last_ip;
+ int errcode;
+
+ last_ip.have_ip = 1;
+ last_ip.suppressed = 1;
+
+ errcode = pt_last_ip_query(NULL, &last_ip);
+ ptu_int_eq(errcode, -pte_ip_suppressed);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_initial(void)
+{
+ struct pt_last_ip last_ip;
+ uint64_t ip;
+ int errcode;
+
+ pt_last_ip_init(&last_ip);
+
+ errcode = pt_last_ip_query(&ip, &last_ip);
+ ptu_int_eq(errcode, -pte_noip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query(void)
+{
+ struct pt_last_ip last_ip;
+ uint64_t ip, exp = 42ull;
+ int errcode;
+
+ last_ip.ip = 42ull;
+ last_ip.have_ip = 1;
+ last_ip.suppressed = 0;
+
+ errcode = pt_last_ip_query(&ip, &last_ip);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(last_ip.ip, exp);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_null(void)
+{
+ uint64_t ip = 13ull;
+ int errcode;
+
+ errcode = pt_last_ip_query(&ip, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+ ptu_uint_eq(ip, 13ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_noip(void)
+{
+ struct pt_last_ip last_ip;
+ uint64_t ip = 13ull;
+ int errcode;
+
+ last_ip.ip = 42ull;
+ last_ip.have_ip = 0;
+ last_ip.suppressed = 0;
+
+ errcode = pt_last_ip_query(&ip, &last_ip);
+ ptu_int_eq(errcode, -pte_noip);
+ ptu_uint_eq(ip, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_suppressed(void)
+{
+ struct pt_last_ip last_ip;
+ uint64_t ip = 13ull;
+ int errcode;
+
+ last_ip.ip = 42ull;
+ last_ip.have_ip = 1;
+ last_ip.suppressed = 1;
+
+ errcode = pt_last_ip_query(&ip, &last_ip);
+ ptu_int_eq(errcode, -pte_ip_suppressed);
+ ptu_uint_eq(ip, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_suppressed(uint32_t have_ip)
+{
+ struct pt_last_ip last_ip;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ last_ip.ip = 42ull;
+ last_ip.have_ip = have_ip;
+ last_ip.suppressed = 0;
+
+ packet.ipc = pt_ipc_suppressed;
+ packet.ip = 13ull;
+
+ errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(last_ip.ip, 42ull);
+ ptu_uint_eq(last_ip.have_ip, have_ip);
+ ptu_uint_eq(last_ip.suppressed, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_upd16(uint32_t have_ip)
+{
+ struct pt_last_ip last_ip;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ last_ip.ip = 0xff0042ull;
+ last_ip.have_ip = have_ip;
+ last_ip.suppressed = 0;
+
+ packet.ipc = pt_ipc_update_16;
+ packet.ip = 0xccc013ull;
+
+ errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(last_ip.ip, 0xffc013ull);
+ ptu_uint_eq(last_ip.have_ip, 1);
+ ptu_uint_eq(last_ip.suppressed, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_upd32(uint32_t have_ip)
+{
+ struct pt_last_ip last_ip;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ last_ip.ip = 0xff00000420ull;
+ last_ip.have_ip = have_ip;
+ last_ip.suppressed = 0;
+
+ packet.ipc = pt_ipc_update_32;
+ packet.ip = 0xcc0000c013ull;
+
+ errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(last_ip.ip, 0xff0000c013ull);
+ ptu_uint_eq(last_ip.have_ip, 1);
+ ptu_uint_eq(last_ip.suppressed, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_sext48(uint32_t have_ip)
+{
+ struct pt_last_ip last_ip;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ last_ip.ip = 0x7fffffffffffffffull;
+ last_ip.have_ip = have_ip;
+ last_ip.suppressed = 0;
+
+ packet.ipc = pt_ipc_sext_48;
+ packet.ip = 0xff00000000ffull;
+
+ errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(last_ip.ip, 0xffffff00000000ffull);
+ ptu_uint_eq(last_ip.have_ip, 1);
+ ptu_uint_eq(last_ip.suppressed, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_bad_packet(uint32_t have_ip)
+{
+ struct pt_last_ip last_ip;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ last_ip.ip = 0x7fffffffffffffffull;
+ last_ip.have_ip = have_ip;
+ last_ip.suppressed = 0;
+
+ packet.ipc = (enum pt_ip_compression) 0xff;
+ packet.ip = 0ull;
+
+ errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL);
+ ptu_int_eq(errcode, -pte_bad_packet);
+ ptu_uint_eq(last_ip.ip, 0x7fffffffffffffffull);
+ ptu_uint_eq(last_ip.have_ip, have_ip);
+ ptu_uint_eq(last_ip.suppressed, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_null_ip(void)
+{
+ struct pt_packet_ip packet;
+ int errcode;
+
+ errcode = pt_last_ip_update_ip(NULL, &packet, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_null_packet(uint32_t have_ip)
+{
+ struct pt_last_ip last_ip;
+ int errcode;
+
+ last_ip.ip = 0x7fffffffffffffffull;
+ last_ip.have_ip = have_ip;
+ last_ip.suppressed = 0;
+
+ errcode = pt_last_ip_update_ip(&last_ip, NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+ ptu_uint_eq(last_ip.ip, 0x7fffffffffffffffull);
+ ptu_uint_eq(last_ip.have_ip, have_ip);
+ ptu_uint_eq(last_ip.suppressed, 0);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init);
+ ptu_run(suite, init_null);
+ ptu_run(suite, status_initial);
+ ptu_run(suite, status);
+ ptu_run(suite, status_null);
+ ptu_run(suite, status_noip);
+ ptu_run(suite, status_suppressed);
+ ptu_run(suite, query_initial);
+ ptu_run(suite, query);
+ ptu_run(suite, query_null);
+ ptu_run(suite, query_noip);
+ ptu_run(suite, query_suppressed);
+ ptu_run_p(suite, update_ip_suppressed, 0);
+ ptu_run_p(suite, update_ip_suppressed, 1);
+ ptu_run_p(suite, update_ip_upd16, 0);
+ ptu_run_p(suite, update_ip_upd16, 1);
+ ptu_run_p(suite, update_ip_upd32, 0);
+ ptu_run_p(suite, update_ip_upd32, 1);
+ ptu_run_p(suite, update_ip_sext48, 0);
+ ptu_run_p(suite, update_ip_sext48, 1);
+ ptu_run_p(suite, update_ip_bad_packet, 0);
+ ptu_run_p(suite, update_ip_bad_packet, 1);
+ ptu_run(suite, update_ip_null_ip);
+ ptu_run_p(suite, update_ip_null_packet, 0);
+ ptu_run_p(suite, update_ip_null_packet, 1);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-mapped_section.c b/libipt/test/src/ptunit-mapped_section.c
new file mode 100644
index 000000000000..28f0a5bd6779
--- /dev/null
+++ b/libipt/test/src/ptunit-mapped_section.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_mapped_section.h"
+
+#include "intel-pt.h"
+
+
+static struct ptunit_result begin(void)
+{
+ struct pt_mapped_section msec;
+ struct pt_section sec;
+ uint64_t begin;
+
+ pt_msec_init(&msec, &sec, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ begin = pt_msec_begin(&msec);
+ ptu_uint_eq(begin, 0x2000);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result end(void)
+{
+ struct pt_mapped_section msec;
+ struct pt_section sec;
+ uint64_t end;
+
+ pt_msec_init(&msec, &sec, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ end = pt_msec_end(&msec);
+ ptu_uint_eq(end, 0x3000);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result offset(void)
+{
+ struct pt_mapped_section msec;
+ struct pt_section sec;
+ uint64_t offset;
+
+ pt_msec_init(&msec, &sec, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ offset = pt_msec_offset(&msec);
+ ptu_uint_eq(offset, 0x100ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result size(void)
+{
+ struct pt_mapped_section msec;
+ struct pt_section sec;
+ uint64_t size;
+
+ pt_msec_init(&msec, &sec, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ size = pt_msec_size(&msec);
+ ptu_uint_eq(size, 0x1000ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result asid(void)
+{
+ struct pt_mapped_section msec;
+ struct pt_asid asid;
+ const struct pt_asid *pasid;
+
+ pt_asid_init(&asid);
+ asid.cr3 = 0xa00000ull;
+ asid.vmcs = 0xb00000ull;
+
+ pt_msec_init(&msec, NULL, &asid, 0x2000ull, 0x100ull, 0x1000ull);
+
+ pasid = pt_msec_asid(&msec);
+ ptu_ptr(pasid);
+ ptu_uint_eq(pasid->cr3, asid.cr3);
+ ptu_uint_eq(pasid->vmcs, asid.vmcs);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result asid_null(void)
+{
+ struct pt_mapped_section msec;
+ const struct pt_asid *pasid;
+
+ pt_msec_init(&msec, NULL, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ pasid = pt_msec_asid(&msec);
+ ptu_ptr(pasid);
+ ptu_uint_eq(pasid->cr3, pt_asid_no_cr3);
+ ptu_uint_eq(pasid->vmcs, pt_asid_no_vmcs);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result map(void)
+{
+ struct pt_mapped_section msec;
+ uint64_t mapped;
+
+ pt_msec_init(&msec, NULL, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ mapped = pt_msec_map(&msec, 0x900);
+ ptu_uint_eq(mapped, 0x2800);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unmap(void)
+{
+ struct pt_mapped_section msec;
+ uint64_t offset;
+
+ pt_msec_init(&msec, NULL, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ offset = pt_msec_unmap(&msec, 0x3000);
+ ptu_uint_eq(offset, 0x1100);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result section(void)
+{
+ static struct pt_section section;
+ struct pt_mapped_section msec;
+ struct pt_section *psection;
+
+ pt_msec_init(&msec, &section, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ psection = pt_msec_section(&msec);
+ ptu_ptr_eq(psection, &section);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result section_null(void)
+{
+ struct pt_mapped_section msec;
+ struct pt_section *psection;
+
+ pt_msec_init(&msec, NULL, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ psection = pt_msec_section(&msec);
+ ptu_ptr_eq(psection, NULL);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, begin);
+ ptu_run(suite, end);
+ ptu_run(suite, offset);
+ ptu_run(suite, size);
+ ptu_run(suite, asid);
+ ptu_run(suite, asid_null);
+ ptu_run(suite, map);
+ ptu_run(suite, unmap);
+ ptu_run(suite, section);
+ ptu_run(suite, section_null);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-msec_cache.c b/libipt/test/src/ptunit-msec_cache.c
new file mode 100644
index 000000000000..d3926c9a0967
--- /dev/null
+++ b/libipt/test/src/ptunit-msec_cache.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_msec_cache.h"
+
+#include "intel-pt.h"
+
+
+int pt_section_get(struct pt_section *section)
+{
+ uint16_t ucount;
+
+ if (!section)
+ return -pte_internal;
+
+ ucount = section->ucount + 1;
+ if (!ucount)
+ return -pte_overflow;
+
+ section->ucount = ucount;
+ return 0;
+}
+
+int pt_section_put(struct pt_section *section)
+{
+ uint16_t ucount;
+
+ if (!section)
+ return -pte_internal;
+
+ ucount = section->ucount;
+ if (!ucount)
+ return -pte_overflow;
+
+ section->ucount = ucount - 1;
+ return 0;
+}
+
+int pt_section_map(struct pt_section *section)
+{
+ uint16_t ucount, mcount;
+
+ if (!section)
+ return -pte_internal;
+
+ ucount = section->ucount;
+ if (!ucount)
+ return -pte_internal;
+
+ mcount = section->mcount + 1;
+ if (!mcount)
+ return -pte_overflow;
+
+ section->mcount = mcount;
+ return 0;
+}
+
+int pt_section_unmap(struct pt_section *section)
+{
+ uint16_t ucount, mcount;
+
+ if (!section)
+ return -pte_internal;
+
+ ucount = section->ucount;
+ if (!ucount)
+ return -pte_internal;
+
+ mcount = section->mcount;
+ if (!mcount)
+ return -pte_overflow;
+
+ section->mcount = mcount - 1;
+ return 0;
+}
+
+/* A mock image. */
+struct pt_image {
+ /* The section stored in the image.
+ *
+ * This is either the fixture's section or NULL.
+ */
+ struct pt_section *section;
+};
+
+extern int pt_image_validate(struct pt_image *, struct pt_mapped_section *,
+ uint64_t, int);
+extern int pt_image_find(struct pt_image *, struct pt_mapped_section *,
+ const struct pt_asid *, uint64_t);
+
+int pt_image_validate(struct pt_image *image, struct pt_mapped_section *msec,
+ uint64_t vaddr, int isid)
+{
+ struct pt_section *section;
+
+ (void) vaddr;
+ (void) isid;
+
+ if (!image || !msec)
+ return -pte_internal;
+
+ section = image->section;
+ if (!section)
+ return -pte_nomap;
+
+ if (section != msec->section)
+ return -pte_nomap;
+
+ return 0;
+}
+
+int pt_image_find(struct pt_image *image, struct pt_mapped_section *msec,
+ const struct pt_asid *asid, uint64_t vaddr)
+{
+ struct pt_section *section;
+
+ (void) vaddr;
+
+ if (!image || !msec || !asid)
+ return -pte_internal;
+
+ section = image->section;
+ if (!section)
+ return -pte_nomap;
+
+ if (msec->section)
+ return -pte_internal;
+
+ msec->section = section;
+
+ return pt_section_get(section);
+}
+
+/* A test fixture providing a section and checking the use and map count. */
+struct test_fixture {
+ /* A test section. */
+ struct pt_section section;
+
+ /* A test cache. */
+ struct pt_msec_cache mcache;
+
+ /* A test image. */
+ struct pt_image image;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct test_fixture *);
+ struct ptunit_result (*fini)(struct test_fixture *);
+};
+
+static struct ptunit_result init_null(void)
+{
+ int status;
+
+ status = pt_msec_cache_init(NULL);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fini_null(void)
+{
+ pt_msec_cache_fini(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result invalidate_null(void)
+{
+ int status;
+
+ status = pt_msec_cache_invalidate(NULL);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_null(void)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_msec_cache mcache;
+ struct pt_image image;
+ int status;
+
+ status = pt_msec_cache_read(NULL, &msec, &image, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_msec_cache_read(&mcache, NULL, &image, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_msec_cache_read(&mcache, &msec, NULL, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fill_null(void)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_msec_cache mcache;
+ struct pt_image image;
+ struct pt_asid asid;
+ int status;
+
+ memset(&mcache, 0, sizeof(mcache));
+
+ status = pt_msec_cache_fill(NULL, &msec, &image, &asid, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_msec_cache_fill(&mcache, NULL, &image, &asid, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_msec_cache_fill(&mcache, &msec, NULL, &asid, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_msec_cache_fill(&mcache, &msec, &image, NULL, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result invalidate(struct test_fixture *tfix)
+{
+ struct pt_section *section;
+ int status;
+
+ status = pt_msec_cache_invalidate(&tfix->mcache);
+ ptu_int_eq(status, 0);
+
+ section = pt_msec_section(&tfix->mcache.msec);
+ ptu_null(section);
+
+ ptu_uint_eq(tfix->section.mcount, 0);
+ ptu_uint_eq(tfix->section.ucount, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_nomap(struct test_fixture *tfix)
+{
+ const struct pt_mapped_section *msec;
+ int status;
+
+ msec = NULL;
+
+ status = pt_msec_cache_read(&tfix->mcache, &msec, &tfix->image, 0ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_null(msec);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read(struct test_fixture *tfix)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_section *section;
+ int status;
+
+ status = pt_msec_cache_read(&tfix->mcache, &msec, &tfix->image, 0ull);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr_eq(msec, &tfix->mcache.msec);
+
+ section = pt_msec_section(msec);
+ ptu_ptr_eq(section, &tfix->section);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fill_nomap(struct test_fixture *tfix)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_asid asid;
+ struct pt_section *section;
+ int status;
+
+ msec = NULL;
+
+ status = pt_msec_cache_fill(&tfix->mcache, &msec, &tfix->image, &asid,
+ 0ull);
+ ptu_int_eq(status, -pte_nomap);
+
+ section = pt_msec_section(&tfix->mcache.msec);
+ ptu_null(section);
+ ptu_null(msec);
+
+ ptu_uint_eq(tfix->section.mcount, 0);
+ ptu_uint_eq(tfix->section.ucount, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fill(struct test_fixture *tfix)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_section *section;
+ struct pt_asid asid;
+ int status;
+
+ status = pt_msec_cache_fill(&tfix->mcache, &msec, &tfix->image, &asid,
+ 0ull);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr_eq(msec, &tfix->mcache.msec);
+
+ section = pt_msec_section(msec);
+ ptu_ptr_eq(section, &tfix->section);
+
+ ptu_uint_eq(section->mcount, 1);
+ ptu_uint_eq(section->ucount, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sfix_init(struct test_fixture *tfix)
+{
+ memset(&tfix->section, 0, sizeof(tfix->section));
+ memset(&tfix->mcache, 0, sizeof(tfix->mcache));
+ memset(&tfix->image, 0, sizeof(tfix->image));
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ifix_init(struct test_fixture *tfix)
+{
+ ptu_test(sfix_init, tfix);
+
+ tfix->image.section = &tfix->section;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cfix_init(struct test_fixture *tfix)
+{
+ ptu_test(sfix_init, tfix);
+
+ tfix->mcache.msec.section = &tfix->section;
+
+ tfix->section.ucount = 1;
+ tfix->section.mcount = 1;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cifix_init(struct test_fixture *tfix)
+{
+ ptu_test(cfix_init, tfix);
+
+ tfix->image.section = &tfix->section;
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+ struct test_fixture sfix, ifix, cfix, cifix;
+
+ sfix.init = sfix_init;
+ sfix.fini = NULL;
+
+ ifix.init = ifix_init;
+ ifix.fini = NULL;
+
+ cfix.init = cfix_init;
+ cfix.fini = NULL;
+
+ cifix.init = cifix_init;
+ cifix.fini = NULL;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init_null);
+ ptu_run(suite, fini_null);
+ ptu_run(suite, invalidate_null);
+ ptu_run(suite, read_null);
+ ptu_run(suite, fill_null);
+
+ ptu_run_f(suite, invalidate, sfix);
+ ptu_run_f(suite, invalidate, cfix);
+
+ ptu_run_f(suite, read_nomap, sfix);
+ ptu_run_f(suite, read_nomap, ifix);
+ ptu_run_f(suite, read_nomap, cfix);
+ ptu_run_f(suite, read, cifix);
+
+ ptu_run_f(suite, fill_nomap, sfix);
+ ptu_run_f(suite, fill_nomap, cfix);
+ ptu_run_f(suite, fill, ifix);
+ ptu_run_f(suite, fill, cifix);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-packet.c b/libipt/test/src/ptunit-packet.c
new file mode 100644
index 000000000000..c064081f9968
--- /dev/null
+++ b/libipt/test/src/ptunit-packet.c
@@ -0,0 +1,859 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_packet_decoder.h"
+#include "pt_query_decoder.h"
+#include "pt_encoder.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+
+
+/* A test fixture providing everything needed for packet en- and de-coding. */
+struct packet_fixture {
+ /* The trace buffer. */
+ uint8_t buffer[64];
+
+ /* Two packets for encoding[0] and decoding[1]. */
+ struct pt_packet packet[2];
+
+ /* The configuration. */
+ struct pt_config config;
+
+ /* The encoder. */
+ struct pt_encoder encoder;
+
+ /* The decoder. */
+ struct pt_packet_decoder decoder;
+
+ /* The return value for an unknown decode. */
+ int unknown;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct packet_fixture *);
+ struct ptunit_result (*fini)(struct packet_fixture *);
+};
+
+static int pfix_decode_unknown(struct pt_packet_unknown *packet,
+ const struct pt_config *config,
+ const uint8_t *pos, void *context)
+{
+ struct packet_fixture *pfix;
+
+ if (!packet || !config)
+ return -pte_internal;
+
+ pfix = (struct packet_fixture *) context;
+ if (!pfix)
+ return -pte_internal;
+
+ if (config->begin != pfix->buffer)
+ return -pte_internal;
+
+ if (config->end != pfix->buffer + sizeof(pfix->buffer))
+ return -pte_internal;
+
+ if (pos != pfix->buffer)
+ return -pte_internal;
+
+ packet->priv = pfix;
+
+ return pfix->unknown;
+}
+
+static struct ptunit_result pfix_init(struct packet_fixture *pfix)
+{
+ int errcode;
+
+ memset(pfix->buffer, 0, sizeof(pfix->buffer));
+ memset(pfix->packet, 0, sizeof(pfix->packet));
+ memset(&pfix->config, 0, sizeof(pfix->config));
+ pfix->config.size = sizeof(pfix->config);
+ pfix->config.begin = pfix->buffer;
+ pfix->config.end = pfix->buffer + sizeof(pfix->buffer);
+ pfix->config.decode.callback = pfix_decode_unknown;
+ pfix->config.decode.context = pfix;
+
+ pt_encoder_init(&pfix->encoder, &pfix->config);
+ pt_pkt_decoder_init(&pfix->decoder, &pfix->config);
+
+ errcode = pt_pkt_sync_set(&pfix->decoder, 0x0ull);
+ ptu_int_eq(errcode, 0);
+
+ pfix->unknown = 0;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pfix_fini(struct packet_fixture *pfix)
+{
+ pt_encoder_fini(&pfix->encoder);
+ pt_pkt_decoder_fini(&pfix->decoder);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptu_pkt_eq(const struct pt_packet *enc,
+ const struct pt_packet *dec)
+{
+ const uint8_t *renc, *rdec;
+ size_t byte;
+
+ ptu_ptr(enc);
+ ptu_ptr(dec);
+
+ renc = (const uint8_t *) enc;
+ rdec = (const uint8_t *) dec;
+
+ for (byte = 0; byte < sizeof(*enc); ++byte)
+ ptu_uint_eq(renc[byte], rdec[byte]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pfix_test(struct packet_fixture *pfix)
+{
+ int size;
+
+ size = pt_enc_next(&pfix->encoder, &pfix->packet[0]);
+ ptu_int_gt(size, 0);
+
+ pfix->packet[0].size = (uint8_t) size;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_gt(size, 0);
+
+ return ptu_pkt_eq(&pfix->packet[0], &pfix->packet[1]);
+}
+
+static struct ptunit_result no_payload(struct packet_fixture *pfix,
+ enum pt_packet_type type)
+{
+ pfix->packet[0].type = type;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unknown(struct packet_fixture *pfix, int exp)
+{
+ int size;
+
+ pfix->buffer[0] = pt_opc_bad;
+ pfix->unknown = exp;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, pfix->unknown);
+
+ if (size >= 0) {
+ ptu_int_eq(pfix->packet[1].type, ppt_unknown);
+ ptu_uint_eq(pfix->packet[1].size, (uint8_t) size);
+ ptu_ptr_eq(pfix->packet[1].payload.unknown.packet,
+ pfix->buffer);
+ ptu_ptr_eq(pfix->packet[1].payload.unknown.priv, pfix);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unknown_ext(struct packet_fixture *pfix, int exp)
+{
+ int size;
+
+ pfix->buffer[0] = pt_opc_ext;
+ pfix->buffer[1] = pt_ext_bad;
+ pfix->unknown = exp;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, pfix->unknown);
+
+ if (size >= 0) {
+ ptu_int_eq(pfix->packet[1].type, ppt_unknown);
+ ptu_uint_eq(pfix->packet[1].size, (uint8_t) size);
+ ptu_ptr_eq(pfix->packet[1].payload.unknown.packet,
+ pfix->buffer);
+ ptu_ptr_eq(pfix->packet[1].payload.unknown.priv, pfix);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unknown_ext2(struct packet_fixture *pfix, int exp)
+{
+ int size;
+
+ pfix->buffer[0] = pt_opc_ext;
+ pfix->buffer[1] = pt_ext_ext2;
+ pfix->buffer[2] = pt_ext2_bad;
+ pfix->unknown = exp;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, exp);
+
+ if (exp >= 0) {
+ ptu_int_eq(pfix->packet[1].type, ppt_unknown);
+ ptu_uint_eq(pfix->packet[1].size, (uint8_t) size);
+ ptu_ptr_eq(pfix->packet[1].payload.unknown.packet,
+ pfix->buffer);
+ ptu_ptr_eq(pfix->packet[1].payload.unknown.priv, pfix);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tnt_8(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_tnt_8;
+ pfix->packet[0].payload.tnt.bit_size = 4;
+ pfix->packet[0].payload.tnt.payload = 0x5ull;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tnt_64(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_tnt_64;
+ pfix->packet[0].payload.tnt.bit_size = 23;
+ pfix->packet[0].payload.tnt.payload = 0xabcdeull;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ip(struct packet_fixture *pfix,
+ enum pt_packet_type type,
+ enum pt_ip_compression ipc,
+ uint64_t ip)
+{
+ pfix->packet[0].type = type;
+ pfix->packet[0].payload.ip.ipc = ipc;
+ pfix->packet[0].payload.ip.ip = ip;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mode_exec(struct packet_fixture *pfix,
+ enum pt_exec_mode mode)
+{
+ struct pt_packet_mode_exec packet;
+
+ packet = pt_set_exec_mode(mode);
+
+ pfix->packet[0].type = ppt_mode;
+ pfix->packet[0].payload.mode.leaf = pt_mol_exec;
+ pfix->packet[0].payload.mode.bits.exec.csl = packet.csl;
+ pfix->packet[0].payload.mode.bits.exec.csd = packet.csd;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mode_tsx(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_mode;
+ pfix->packet[0].payload.mode.leaf = pt_mol_tsx;
+ pfix->packet[0].payload.mode.bits.tsx.intx = 1;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pip(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_pip;
+ pfix->packet[0].payload.pip.cr3 = 0x4200ull;
+ pfix->packet[0].payload.pip.nr = 1;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tsc(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_tsc;
+ pfix->packet[0].payload.tsc.tsc = 0x42ull;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cbr(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_cbr;
+ pfix->packet[0].payload.cbr.ratio = 0x23;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tma(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_tma;
+ pfix->packet[0].payload.tma.ctc = 0x42;
+ pfix->packet[0].payload.tma.fc = 0x123;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tma_bad(struct packet_fixture *pfix)
+{
+ int errcode;
+
+ pfix->packet[0].type = ppt_tma;
+ pfix->packet[0].payload.tma.ctc = 0x42;
+ pfix->packet[0].payload.tma.fc = 0x200;
+
+ errcode = pt_enc_next(&pfix->encoder, &pfix->packet[0]);
+ ptu_int_eq(errcode, -pte_bad_packet);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mtc(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_mtc;
+ pfix->packet[0].payload.mtc.ctc = 0x23;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cyc(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_cyc;
+ pfix->packet[0].payload.cyc.value = 0x23;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vmcs(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_vmcs;
+ pfix->packet[0].payload.vmcs.base = 0xabcdef000ull;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mnt(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_mnt;
+ pfix->packet[0].payload.mnt.payload = 0x1234567890abcdefull;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result exstop(struct packet_fixture *pfix, int ip)
+{
+ pfix->packet[0].type = ppt_exstop;
+ pfix->packet[0].payload.exstop.ip = ip ? 1 : 0;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mwait(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_mwait;
+ pfix->packet[0].payload.mwait.hints = 0xc;
+ pfix->packet[0].payload.mwait.ext = 0x1;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pwre(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_pwre;
+ pfix->packet[0].payload.pwre.state = 0x0;
+ pfix->packet[0].payload.pwre.sub_state = 0x3;
+ pfix->packet[0].payload.pwre.hw = 1;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pwrx(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_pwrx;
+ pfix->packet[0].payload.pwrx.last = 0x3;
+ pfix->packet[0].payload.pwrx.deepest = 0xa;
+ pfix->packet[0].payload.pwrx.store = 1;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptw(struct packet_fixture *pfix, uint8_t plc,
+ int ip)
+{
+ uint64_t pl, mask;
+ int size;
+
+ size = pt_ptw_size(plc);
+ ptu_int_gt(size, 0);
+
+ pl = 0x1234567890abcdefull;
+
+ ptu_uint_le((size_t) size, sizeof(mask));
+ mask = ~0ull >> ((sizeof(mask) - (size_t) size) * 8);
+
+ pfix->packet[0].type = ppt_ptw;
+ pfix->packet[0].payload.ptw.payload = pl & mask;
+ pfix->packet[0].payload.ptw.plc = plc;
+ pfix->packet[0].payload.ptw.ip = ip ? 1 : 0;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cutoff(struct packet_fixture *pfix,
+ enum pt_packet_type type)
+{
+ int size;
+
+ pfix->packet[0].type = type;
+
+ size = pt_enc_next(&pfix->encoder, &pfix->packet[0]);
+ ptu_int_gt(size, 0);
+
+ pfix->decoder.config.end = pfix->encoder.pos - 1;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cutoff_ip(struct packet_fixture *pfix,
+ enum pt_packet_type type)
+{
+ int size;
+
+ pfix->packet[0].type = type;
+ pfix->packet[0].payload.ip.ipc = pt_ipc_sext_48;
+
+ size = pt_enc_next(&pfix->encoder, &pfix->packet[0]);
+ ptu_int_gt(size, 0);
+
+ pfix->decoder.config.end = pfix->encoder.pos - 1;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cutoff_cyc(struct packet_fixture *pfix)
+{
+ int size;
+
+ pfix->packet[0].type = ppt_cyc;
+ pfix->packet[0].payload.cyc.value = 0xa8;
+
+ size = pt_enc_next(&pfix->encoder, &pfix->packet[0]);
+ ptu_int_gt(size, 0);
+
+ pfix->decoder.config.end = pfix->encoder.pos - 1;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cutoff_mode(struct packet_fixture *pfix,
+ enum pt_mode_leaf leaf)
+{
+ int size;
+
+ pfix->packet[0].type = ppt_mode;
+ pfix->packet[0].payload.mode.leaf = leaf;
+
+ size = pt_enc_next(&pfix->encoder, &pfix->packet[0]);
+ ptu_int_gt(size, 0);
+
+ pfix->decoder.config.end = pfix->encoder.pos - 1;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, -pte_eos);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct packet_fixture pfix;
+ struct ptunit_suite suite;
+
+ pfix.init = pfix_init;
+ pfix.fini = pfix_fini;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run_fp(suite, no_payload, pfix, ppt_pad);
+ ptu_run_fp(suite, no_payload, pfix, ppt_psb);
+ ptu_run_fp(suite, no_payload, pfix, ppt_ovf);
+ ptu_run_fp(suite, no_payload, pfix, ppt_psbend);
+ ptu_run_fp(suite, no_payload, pfix, ppt_stop);
+
+ ptu_run_fp(suite, unknown, pfix, 4);
+ ptu_run_fp(suite, unknown, pfix, -pte_nomem);
+ ptu_run_fp(suite, unknown_ext, pfix, 4);
+ ptu_run_fp(suite, unknown_ext, pfix, -pte_nomem);
+ ptu_run_fp(suite, unknown_ext2, pfix, 4);
+ ptu_run_fp(suite, unknown_ext2, pfix, -pte_nomem);
+
+ ptu_run_f(suite, tnt_8, pfix);
+ ptu_run_f(suite, tnt_64, pfix);
+
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_suppressed, 0x0ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_16, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_32, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_48, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_sext_48, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_full, 0x42ull);
+
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_suppressed, 0x0ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_16, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_32, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_48, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_sext_48, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_full, 0x42ull);
+
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_suppressed, 0x0ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_update_16, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_update_32, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_update_48, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_sext_48, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_full, 0x42ull);
+
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_suppressed, 0x0ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_update_16, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_update_32, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_update_48, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_sext_48, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_full, 0x42ull);
+
+ ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_suppressed, 0x0ull);
+ ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_update_16, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_update_32, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_update_48, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_sext_48, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_full, 0x42ull);
+
+ ptu_run_fp(suite, mode_exec, pfix, ptem_16bit);
+ ptu_run_fp(suite, mode_exec, pfix, ptem_32bit);
+ ptu_run_fp(suite, mode_exec, pfix, ptem_64bit);
+ ptu_run_f(suite, mode_tsx, pfix);
+
+ ptu_run_f(suite, pip, pfix);
+ ptu_run_f(suite, tsc, pfix);
+ ptu_run_f(suite, cbr, pfix);
+ ptu_run_f(suite, tma, pfix);
+ ptu_run_f(suite, tma_bad, pfix);
+ ptu_run_f(suite, mtc, pfix);
+ ptu_run_f(suite, cyc, pfix);
+ ptu_run_f(suite, vmcs, pfix);
+ ptu_run_f(suite, mnt, pfix);
+ ptu_run_fp(suite, exstop, pfix, 0);
+ ptu_run_fp(suite, exstop, pfix, 1);
+ ptu_run_f(suite, mwait, pfix);
+ ptu_run_f(suite, pwre, pfix);
+ ptu_run_f(suite, pwrx, pfix);
+ ptu_run_fp(suite, ptw, pfix, 0, 1);
+ ptu_run_fp(suite, ptw, pfix, 1, 0);
+
+ ptu_run_fp(suite, cutoff, pfix, ppt_psb);
+ ptu_run_fp(suite, cutoff_ip, pfix, ppt_tip);
+ ptu_run_fp(suite, cutoff_ip, pfix, ppt_tip_pge);
+ ptu_run_fp(suite, cutoff_ip, pfix, ppt_tip_pgd);
+ ptu_run_fp(suite, cutoff_ip, pfix, ppt_fup);
+ ptu_run_fp(suite, cutoff, pfix, ppt_ovf);
+ ptu_run_fp(suite, cutoff, pfix, ppt_psbend);
+ ptu_run_fp(suite, cutoff, pfix, ppt_tnt_64);
+ ptu_run_fp(suite, cutoff, pfix, ppt_tsc);
+ ptu_run_fp(suite, cutoff, pfix, ppt_cbr);
+ ptu_run_fp(suite, cutoff, pfix, ppt_tma);
+ ptu_run_fp(suite, cutoff, pfix, ppt_mtc);
+ ptu_run_f(suite, cutoff_cyc, pfix);
+ ptu_run_fp(suite, cutoff_mode, pfix, pt_mol_exec);
+ ptu_run_fp(suite, cutoff_mode, pfix, pt_mol_tsx);
+ ptu_run_fp(suite, cutoff, pfix, ppt_vmcs);
+ ptu_run_fp(suite, cutoff, pfix, ppt_mnt);
+ ptu_run_fp(suite, cutoff, pfix, ppt_exstop);
+ ptu_run_fp(suite, cutoff, pfix, ppt_mwait);
+ ptu_run_fp(suite, cutoff, pfix, ppt_pwre);
+ ptu_run_fp(suite, cutoff, pfix, ppt_pwrx);
+ ptu_run_fp(suite, cutoff, pfix, ppt_ptw);
+
+ return ptunit_report(&suite);
+}
+
+
+/* Dummy decode functions to satisfy link dependencies.
+ *
+ * As a nice side-effect, we will know if we need to add more tests when
+ * adding new decoder functions.
+ */
+struct pt_query_decoder;
+
+int pt_qry_decode_unknown(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pad(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_psb(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tip(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tnt_8(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tnt_64(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tip_pge(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tip_pgd(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_fup(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_fup(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pip(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_pip(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_ovf(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mode(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_mode(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_psbend(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tsc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_tsc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_cbr(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_cbr(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tma(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mtc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_cyc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_stop(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_vmcs(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_vmcs(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mnt(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_mnt(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_exstop(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mwait(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pwre(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pwrx(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_ptw(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
diff --git a/libipt/test/src/ptunit-query.c b/libipt/test/src/ptunit-query.c
new file mode 100644
index 000000000000..c47ad1223ed2
--- /dev/null
+++ b/libipt/test/src/ptunit-query.c
@@ -0,0 +1,2873 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_last_ip.h"
+#include "pt_decoder_function.h"
+#include "pt_query_decoder.h"
+#include "pt_encoder.h"
+#include "pt_opcodes.h"
+
+
+/* A query testing fixture. */
+
+struct ptu_decoder_fixture {
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct ptu_decoder_fixture *);
+ struct ptunit_result (*fini)(struct ptu_decoder_fixture *);
+
+ /* Encode an optional header for the test to read over. */
+ struct ptunit_result (*header)(struct ptu_decoder_fixture *);
+
+ /* The trace buffer. */
+ uint8_t buffer[1024];
+
+ /* The configuration under test. */
+ struct pt_config config;
+
+ /* A encoder and query decoder for the above configuration. */
+ struct pt_encoder encoder;
+ struct pt_query_decoder decoder;
+
+ /* For tracking last-ip in tests. */
+ struct pt_last_ip last_ip;
+};
+
+/* An invalid address. */
+static const uint64_t pt_dfix_bad_ip = (1ull << 62) - 1;
+
+/* A sign-extended address. */
+static const uint64_t pt_dfix_sext_ip = 0xffffff00ff00ff00ull;
+
+/* The highest possible address. */
+static const uint64_t pt_dfix_max_ip = (1ull << 47) - 1;
+
+/* The highest possible cr3 value. */
+static const uint64_t pt_dfix_max_cr3 = ((1ull << 47) - 1) & ~0x1f;
+
+/* Synchronize the decoder at the beginning of the trace stream, avoiding the
+ * initial PSB header.
+ */
+static struct ptunit_result ptu_sync_decoder(struct pt_query_decoder *decoder)
+{
+ ptu_ptr(decoder);
+ decoder->enabled = 1;
+
+ (void) pt_df_fetch(&decoder->next, decoder->pos, &decoder->config);
+ return ptu_passed();
+}
+
+/* Cut off the last encoded packet. */
+static struct ptunit_result cutoff(struct pt_query_decoder *decoder,
+ const struct pt_encoder *encoder)
+{
+ uint8_t *pos;
+
+ ptu_ptr(decoder);
+ ptu_ptr(encoder);
+
+ pos = encoder->pos;
+ ptu_ptr(pos);
+
+ pos -= 1;
+ ptu_ptr_le(decoder->config.begin, pos);
+
+ decoder->config.end = pos;
+ return ptu_passed();
+}
+
+static struct ptunit_result indir_not_synced(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ int errcode;
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_nosync);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cond_not_synced(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_nosync);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_not_synced(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_event event;
+ int errcode;
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_nosync);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_backward(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t sync[3], offset, ip;
+ int errcode;
+
+ /* Check that we can use repeated pt_qry_sync_backward() to iterate over
+ * synchronization points in backwards order.
+ */
+
+ errcode = pt_enc_get_offset(encoder, &sync[0]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[1]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[2]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ /* Synchronize repeatedly and check that we reach each PSB in the
+ * correct order.
+ */
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[2]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[1]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[0]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+sync_backward_empty_end(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t sync[3], offset, ip;
+ int errcode;
+
+ /* Check that we can use repeated pt_qry_sync_backward() to iterate over
+ * synchronization points in backwards order.
+ *
+ * There's an empty PSB+ at the end. We skip it.
+ */
+
+ errcode = pt_enc_get_offset(encoder, &sync[0]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[1]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[2]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_psbend(encoder);
+
+ /* Synchronize repeatedly and check that we reach each PSB in the
+ * correct order.
+ */
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[1]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[0]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+sync_backward_empty_mid(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t sync[3], offset, ip;
+ int errcode;
+
+ /* Check that we can use repeated pt_qry_sync_backward() to iterate over
+ * synchronization points in backwards order.
+ *
+ * There's an empty PSB+ in the middle. We skip it.
+ */
+
+ errcode = pt_enc_get_offset(encoder, &sync[0]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[1]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[2]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ /* Synchronize repeatedly and check that we reach each PSB in the
+ * correct order.
+ */
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[2]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[0]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+sync_backward_empty_begin(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t sync[3], offset, ip;
+ int errcode;
+
+ /* Check that we can use repeated pt_qry_sync_backward() to iterate over
+ * synchronization points in backwards order.
+ *
+ * There's an empty PSB+ at the beginning. We skip it.
+ */
+
+ errcode = pt_enc_get_offset(encoder, &sync[0]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[1]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[2]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ /* Synchronize repeatedly and check that we reach each PSB in the
+ * correct order.
+ */
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[2]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[1]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+decode_sync_backward(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ uint64_t sync[2], offset, ip;
+ int errcode;
+
+ /* Check that we can use sync_backward to re-sync at the current trace
+ * segment as well as to find the previous trace segment.
+ */
+
+ errcode = pt_enc_get_offset(encoder, &sync[0]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[1]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+
+ errcode = pt_qry_sync_forward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[0]);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_ge(errcode, 0);
+ ptu_int_eq(event.type, ptev_exec_mode);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_ge(errcode, 0);
+ ptu_int_eq(event.type, ptev_exec_mode);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[1]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[0]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result indir_null(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_config *config = &decoder->config;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ int errcode;
+
+ errcode = pt_qry_indirect_branch(NULL, &addr);
+ ptu_int_eq(errcode, -pte_invalid);
+ ptu_uint_eq(addr, ip);
+
+ errcode = pt_qry_indirect_branch(decoder, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+ ptu_ptr_eq(decoder->pos, config->begin);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result indir_empty(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_config *config = &decoder->config;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ int errcode;
+
+ decoder->pos = config->end;
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_eos);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result indir(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip packet;
+ uint64_t addr = pt_dfix_bad_ip;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = pt_dfix_sext_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_tip(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ if (ipc == pt_ipc_suppressed) {
+ ptu_int_eq(errcode, pts_ip_suppressed | pts_eos);
+ ptu_uint_eq(addr, pt_dfix_bad_ip);
+ } else {
+ ptu_int_eq(errcode, pts_eos);
+ ptu_uint_eq(addr, dfix->last_ip.ip);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result indir_tnt(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip packet;
+ uint64_t addr = pt_dfix_bad_ip;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = pt_dfix_sext_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_tnt_8(encoder, 0ull, 1);
+ pt_encode_tip(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ if (ipc == pt_ipc_suppressed) {
+ ptu_int_eq(errcode, pts_ip_suppressed);
+ ptu_uint_eq(addr, pt_dfix_bad_ip);
+ } else {
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(addr, dfix->last_ip.ip);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result indir_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ int errcode;
+
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_eos);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+indir_skip_tnt_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ int errcode;
+
+ pt_encode_tnt_8(encoder, 0, 1);
+ pt_encode_tnt_8(encoder, 0, 1);
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+indir_skip_tip_pge_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ const uint8_t *pos;
+ int errcode;
+
+ pos = encoder->pos;
+ pt_encode_tip_pge(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+indir_skip_tip_pgd_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ const uint8_t *pos;
+ int errcode;
+
+ pos = encoder->pos;
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+indir_skip_fup_tip_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ const uint8_t *pos;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+ pos = encoder->pos;
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+indir_skip_fup_tip_pgd_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ const uint8_t *pos;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+ pos = encoder->pos;
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cond_null(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_config *config = &decoder->config;
+ int errcode, tnt = 0xbc, taken = tnt;
+
+ errcode = pt_qry_cond_branch(NULL, &taken);
+ ptu_int_eq(errcode, -pte_invalid);
+ ptu_int_eq(taken, tnt);
+
+ errcode = pt_qry_cond_branch(decoder, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+ ptu_ptr_eq(decoder->pos, config->begin);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cond_empty(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_config *config = &decoder->config;
+ int errcode, tnt = 0xbc, taken = tnt;
+
+ decoder->pos = config->end;
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_eos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cond(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+
+ pt_encode_tnt_8(encoder, 0x02, 3);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, 0);
+ ptu_int_eq(taken, 0);
+
+ taken = tnt;
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, 0);
+ ptu_int_eq(taken, 1);
+
+ taken = tnt;
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(taken, 0);
+
+ taken = tnt;
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_eos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cond_skip_tip_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+ const uint8_t *pos;
+
+ pos = encoder->pos;
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tnt_8(encoder, 0, 1);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+cond_skip_tip_pge_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+ const uint8_t *pos;
+
+ pos = encoder->pos;
+ pt_encode_tip_pge(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tnt_8(encoder, 0, 1);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+cond_skip_tip_pgd_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+ const uint8_t *pos;
+
+ pos = encoder->pos;
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tnt_8(encoder, 0, 1);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+cond_skip_fup_tip_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+ const uint8_t *pos;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+ pos = encoder->pos;
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tnt_8(encoder, 0, 1);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+cond_skip_fup_tip_pgd_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+ const uint8_t *pos;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+ pos = encoder->pos;
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tnt_8(encoder, 0, 1);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_null(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_config *config = &decoder->config;
+ struct pt_event event;
+ int errcode;
+
+ errcode = pt_qry_event(NULL, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_qry_event(decoder, NULL, sizeof(event));
+ ptu_int_eq(errcode, -pte_invalid);
+ ptu_ptr_eq(decoder->pos, config->begin);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_bad_size(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_event event;
+ int errcode;
+
+ errcode = pt_qry_event(decoder, &event, 4);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_small_size(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ union {
+ struct pt_event event;
+ uint8_t buffer[41];
+ } variant;
+ int errcode;
+
+ memset(variant.buffer, 0xcd, sizeof(variant.buffer));
+
+ pt_encode_tip_pge(encoder, 0ull, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &variant.event, 40);
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(variant.event.type, ptev_enabled);
+ ptu_uint_eq(variant.buffer[40], 0xcd);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_big_size(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ union {
+ struct pt_event event;
+ uint8_t buffer[1024];
+ } variant;
+ int errcode;
+
+ memset(variant.buffer, 0xcd, sizeof(variant.buffer));
+
+ pt_encode_tip_pge(encoder, 0ull, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &variant.event, sizeof(variant.buffer));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(variant.event.type, ptev_enabled);
+ ptu_uint_eq(variant.buffer[sizeof(variant.event)], 0xcd);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_empty(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_config *config = &decoder->config;
+ struct pt_event event;
+ int errcode;
+
+ decoder->pos = config->end;
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_enabled(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip packet;
+ struct pt_event event;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = pt_dfix_max_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_tip_pge(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ if (ipc == pt_ipc_suppressed)
+ ptu_int_eq(errcode, -pte_bad_packet);
+ else {
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_enabled);
+ ptu_uint_eq(event.variant.enabled.ip, dfix->last_ip.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_enabled_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_tip_pge(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_disabled(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip packet;
+ struct pt_event event;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = pt_dfix_sext_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_tip_pgd(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ if (ipc == pt_ipc_suppressed)
+ ptu_uint_ne(event.ip_suppressed, 0);
+ else {
+ ptu_uint_eq(event.ip_suppressed, 0);
+ ptu_uint_eq(event.variant.disabled.ip, dfix->last_ip.ip);
+ }
+ ptu_int_eq(event.type, ptev_disabled);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_disabled_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_update_32);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_disabled(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc, uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip fup, tip;
+ struct pt_event event;
+ int errcode;
+
+ fup.ipc = pt_ipc_sext_48;
+ fup.ip = pt_dfix_max_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &fup, &dfix->config);
+
+ tip.ipc = ipc;
+ tip.ip = pt_dfix_sext_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &tip, &dfix->config);
+
+ pt_encode_fup(encoder, fup.ip, fup.ipc);
+ pt_encode_tip_pgd(encoder, tip.ip, tip.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ if (ipc == pt_ipc_suppressed)
+ ptu_uint_ne(event.ip_suppressed, 0);
+ else {
+ ptu_uint_eq(event.ip_suppressed, 0);
+ ptu_uint_eq(event.variant.async_disabled.ip, dfix->last_ip.ip);
+ }
+ ptu_int_eq(event.type, ptev_async_disabled);
+ ptu_uint_eq(event.variant.async_disabled.at, fup.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_disabled_suppressed_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_suppressed);
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_ip_suppressed);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_disabled_cutoff_fail_a(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ uint64_t at = pt_dfix_sext_ip;
+ int errcode;
+
+ pt_encode_fup(encoder, at, pt_ipc_sext_48);
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_update_16);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_disabled_cutoff_fail_b(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_branch_suppressed_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_suppressed);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_ip_suppressed);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_async_branch(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip fup, tip;
+ struct pt_event event;
+ int errcode;
+
+ fup.ipc = pt_ipc_sext_48;
+ fup.ip = pt_dfix_max_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &fup, &dfix->config);
+
+ tip.ipc = ipc;
+ tip.ip = pt_dfix_sext_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &tip, &dfix->config);
+
+ pt_encode_fup(encoder, fup.ip, fup.ipc);
+ pt_encode_tip(encoder, tip.ip, tip.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ if (ipc == pt_ipc_suppressed)
+ ptu_uint_ne(event.ip_suppressed, 0);
+ else {
+ ptu_uint_eq(event.ip_suppressed, 0);
+ ptu_uint_eq(event.variant.async_branch.to, dfix->last_ip.ip);
+ }
+ ptu_int_eq(event.type, ptev_async_branch);
+ ptu_uint_eq(event.variant.async_branch.from, fup.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_branch_cutoff_fail_a(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_update_16);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_branch_cutoff_fail_b(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_paging(struct ptu_decoder_fixture *dfix,
+ uint8_t flags, uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ uint64_t cr3 = pt_dfix_max_cr3;
+ int errcode;
+
+ pt_encode_pip(encoder, cr3, flags);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_paging);
+ ptu_uint_eq(event.variant.paging.cr3, cr3);
+ ptu_uint_eq(event.variant.paging.non_root, (flags & pt_pl_pip_nr) != 0);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_paging_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_pip(encoder, 0, 0);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_paging(struct ptu_decoder_fixture *dfix, uint8_t flags,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ uint64_t to = pt_dfix_sext_ip, from = to & ~0xffffull;
+ uint64_t cr3 = pt_dfix_max_cr3;
+ int errcode;
+
+ pt_encode_fup(encoder, from, pt_ipc_sext_48);
+ pt_encode_pip(encoder, cr3, flags);
+ pt_encode_tip(encoder, to, pt_ipc_update_16);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_int_eq(event.type, ptev_async_branch);
+ ptu_uint_eq(event.variant.async_branch.from, from);
+ ptu_uint_eq(event.variant.async_branch.to, to);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_async_paging);
+ ptu_uint_eq(event.variant.async_paging.cr3, cr3);
+ ptu_uint_eq(event.variant.async_paging.non_root,
+ (flags & pt_pl_pip_nr) != 0);
+ ptu_uint_eq(event.variant.async_paging.ip, to);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_paging_suppressed(struct ptu_decoder_fixture *dfix, uint8_t flags,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ uint64_t from = pt_dfix_sext_ip, cr3 = pt_dfix_max_cr3;
+ int errcode;
+
+ pt_encode_fup(encoder, from, pt_ipc_sext_48);
+ pt_encode_pip(encoder, cr3, flags);
+ pt_encode_tip(encoder, 0, pt_ipc_suppressed);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_uint_ne(event.ip_suppressed, 0);
+ ptu_int_eq(event.type, ptev_async_branch);
+ ptu_uint_eq(event.variant.async_branch.from, from);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_uint_ne(event.ip_suppressed, 0);
+ ptu_int_eq(event.type, ptev_async_paging);
+ ptu_uint_eq(event.variant.async_paging.cr3, cr3);
+ ptu_uint_eq(event.variant.async_paging.non_root,
+ (flags & pt_pl_pip_nr) != 0);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_paging_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+ pt_encode_pip(encoder, 0, 0);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_overflow_fup(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = 0xccull;
+
+ pt_last_ip_init(&dfix->last_ip);
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_ovf(encoder);
+ pt_encode_fup(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ switch (ipc) {
+ case pt_ipc_suppressed:
+ ptu_int_eq(errcode, -pte_noip);
+ break;
+
+ case pt_ipc_update_16:
+ case pt_ipc_update_32:
+ case pt_ipc_update_48:
+ case pt_ipc_sext_48:
+ case pt_ipc_full:
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_overflow);
+ ptu_uint_eq(event.variant.overflow.ip, dfix->last_ip.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+ break;
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_overflow_tip_pge(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc, uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = 0xccull;
+
+ pt_last_ip_init(&dfix->last_ip);
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_ovf(encoder);
+ pt_encode_tip_pge(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_int_eq(event.type, ptev_overflow);
+ ptu_uint_ne(event.ip_suppressed, 0);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ switch (ipc) {
+ case pt_ipc_suppressed:
+ ptu_int_eq(errcode, -pte_bad_packet);
+ break;
+
+ case pt_ipc_update_16:
+ case pt_ipc_update_32:
+ case pt_ipc_update_48:
+ case pt_ipc_sext_48:
+ case pt_ipc_full:
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_enabled);
+ ptu_uint_eq(event.variant.enabled.ip, dfix->last_ip.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+ break;
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_overflow_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_ovf(encoder);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_stop(struct ptu_decoder_fixture *dfix,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_stop(encoder);
+
+ ptu_sync_decoder(decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_stop);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_exec_mode_tip(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc, uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ enum pt_exec_mode mode = ptem_16bit;
+ struct pt_packet_ip packet;
+ struct pt_event event;
+ uint64_t addr = 0ull;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = pt_dfix_max_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_mode_exec(encoder, mode);
+ pt_encode_tip(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, 0);
+ if (ipc == pt_ipc_suppressed)
+ ptu_uint_ne(event.ip_suppressed, 0);
+ else {
+ ptu_uint_eq(event.ip_suppressed, 0);
+ ptu_uint_eq(event.variant.exec_mode.ip, dfix->last_ip.ip);
+ }
+ ptu_int_eq(event.type, ptev_exec_mode);
+ ptu_int_eq(event.variant.exec_mode.mode, mode);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ if (ipc == pt_ipc_suppressed)
+ ptu_int_eq(errcode, pts_ip_suppressed | pts_eos);
+ else {
+ ptu_int_eq(errcode, pts_eos);
+ ptu_uint_eq(addr, dfix->last_ip.ip);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_exec_mode_tip_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_mode_exec(encoder, ptem_32bit);
+ pt_encode_tip(encoder, 0, pt_ipc_update_16);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_exec_mode_tip_pge(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc, uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ enum pt_exec_mode mode = ptem_16bit;
+ struct pt_packet_ip packet;
+ struct pt_event event;
+ uint64_t addr = 0ull;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = pt_dfix_max_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_mode_exec(encoder, mode);
+ pt_encode_tip_pge(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+ decoder->enabled = 0;
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ if (ipc == pt_ipc_suppressed) {
+ ptu_int_eq(errcode, -pte_bad_packet);
+ ptu_uint_eq(addr, 0ull);
+ } else {
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_int_eq(event.type, ptev_enabled);
+ ptu_uint_eq(event.variant.enabled.ip, dfix->last_ip.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_exec_mode);
+ ptu_int_eq(event.variant.exec_mode.mode, mode);
+ ptu_uint_eq(event.variant.exec_mode.ip, dfix->last_ip.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_exec_mode_tip_pge_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_mode_exec(encoder, ptem_16bit);
+ pt_encode_tip_pge(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_exec_mode_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_mode_exec(encoder, ptem_64bit);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_tsx_fup(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc,
+ uint8_t flags, uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip fup, tip;
+ struct pt_event event;
+ uint64_t addr = 0;
+ int errcode;
+
+ fup.ipc = ipc;
+ fup.ip = pt_dfix_max_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &fup, &dfix->config);
+
+ tip.ipc = pt_ipc_sext_48;
+ tip.ip = pt_dfix_sext_ip;
+
+ pt_encode_mode_tsx(encoder, flags);
+ pt_encode_fup(encoder, fup.ip, fup.ipc);
+ pt_encode_tip(encoder, tip.ip, tip.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, 0);
+ if (ipc == pt_ipc_suppressed)
+ ptu_uint_ne(event.ip_suppressed, 0);
+ else {
+ ptu_uint_eq(event.ip_suppressed, 0);
+ ptu_uint_eq(event.variant.tsx.ip, dfix->last_ip.ip);
+ }
+ ptu_int_eq(event.type, ptev_tsx);
+ ptu_int_eq(event.variant.tsx.speculative,
+ (flags & pt_mob_tsx_intx) != 0);
+ ptu_int_eq(event.variant.tsx.aborted,
+ (flags & pt_mob_tsx_abrt) != 0);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, pts_eos);
+ ptu_uint_eq(addr, tip.ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_tsx_fup_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_mode_tsx(encoder, 0);
+ pt_encode_fup(encoder, 0, pt_ipc_update_16);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_tsx_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_mode_tsx(encoder, 0);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_skip_tip_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ const uint8_t *pos;
+ int errcode;
+
+ pos = encoder->pos;
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+ /* We omit the actual event - we don't get that far, anyway. */
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_skip_tnt_8_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_tnt_8(encoder, 0, 1);
+ pt_encode_tnt_8(encoder, 0, 1);
+ /* We omit the actual event - we don't get that far, anyway. */
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_bad_query);
+ /* The fail position depends on the fixture's header. */
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_skip_tnt_64_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_tnt_64(encoder, 0, 1);
+ pt_encode_tnt_64(encoder, 0, 1);
+ /* We omit the actual event - we don't get that far, anyway. */
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_bad_query);
+ /* The fail position depends on the fixture's header. */
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_event(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip packet;
+ struct pt_event event;
+ uint64_t addr = 0ull;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = 0xccull;
+
+ pt_last_ip_init(&dfix->last_ip);
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_tsx(encoder, pt_mob_tsx_intx);
+ pt_encode_fup(encoder, packet.ip, packet.ipc);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_qry_sync_forward(decoder, &addr);
+ switch (ipc) {
+ case pt_ipc_suppressed:
+ ptu_int_eq(errcode, (pts_event_pending | pts_ip_suppressed));
+ break;
+
+ case pt_ipc_update_16:
+ case pt_ipc_update_32:
+ case pt_ipc_update_48:
+ case pt_ipc_sext_48:
+ case pt_ipc_full:
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_uint_eq(addr, dfix->last_ip.ip);
+ break;
+ }
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_uint_ne(event.status_update, 0);
+ if (ipc == pt_ipc_suppressed)
+ ptu_uint_ne(event.ip_suppressed, 0);
+ else {
+ ptu_uint_eq(event.ip_suppressed, 0);
+ ptu_uint_eq(event.variant.tsx.ip, dfix->last_ip.ip);
+ }
+ ptu_int_eq(event.type, ptev_tsx);
+ ptu_int_eq(event.variant.tsx.speculative, 1);
+ ptu_int_eq(event.variant.tsx.aborted, 0);
+ ptu_int_eq(event.has_tsc, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+sync_event_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t addr;
+ int errcode;
+
+ pt_encode_psb(encoder);
+ pt_encode_psbend(encoder);
+
+ ptu_check(cutoff, decoder, encoder);
+
+ errcode = pt_qry_sync_forward(decoder, &addr);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+sync_event_incomplete_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t addr;
+ int errcode;
+
+ pt_encode_psb(encoder);
+
+ errcode = pt_qry_sync_forward(decoder, &addr);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_ovf_event(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip fup, ovf;
+ struct pt_event event;
+ uint64_t addr = 0;
+ int errcode;
+
+ fup.ipc = pt_ipc_sext_48;
+ fup.ip = pt_dfix_max_ip;
+
+ ovf.ipc = ipc;
+ ovf.ip = 0xccull;
+
+ pt_last_ip_init(&dfix->last_ip);
+ pt_last_ip_update_ip(&dfix->last_ip, &ovf, &dfix->config);
+
+ pt_encode_psb(encoder);
+ pt_encode_fup(encoder, fup.ip, fup.ipc);
+ pt_encode_mode_tsx(encoder, 0);
+ pt_encode_tsc(encoder, 0x1000);
+ pt_encode_ovf(encoder);
+ pt_encode_fup(encoder, ovf.ip, ovf.ipc);
+
+ errcode = pt_qry_sync_forward(decoder, &addr);
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_uint_eq(addr, fup.ip);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_uint_ne(event.status_update, 0);
+ ptu_int_eq(event.type, ptev_tsx);
+ ptu_int_eq(event.variant.tsx.speculative, 0);
+ ptu_int_eq(event.variant.tsx.aborted, 0);
+ ptu_uint_eq(event.variant.tsx.ip, fup.ip);
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, 0x1000);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ switch (ipc) {
+ case pt_ipc_suppressed:
+ ptu_int_eq(errcode, -pte_noip);
+ return ptu_passed();
+
+ case pt_ipc_update_16:
+ case pt_ipc_update_32:
+ case pt_ipc_update_48:
+ case pt_ipc_sext_48:
+ case pt_ipc_full:
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_overflow);
+ ptu_uint_eq(event.variant.overflow.ip, dfix->last_ip.ip);
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, 0x1000);
+ break;
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+sync_ovf_event_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t addr;
+ int errcode;
+
+ pt_encode_psb(encoder);
+ pt_encode_ovf(encoder);
+
+ ptu_check(cutoff, decoder, encoder);
+
+ errcode = pt_qry_sync_forward(decoder, &addr);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result time_null_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint64_t tsc;
+ int errcode;
+
+ errcode = pt_qry_time(NULL, NULL, NULL, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_qry_time(decoder, NULL, NULL, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_qry_time(NULL, &tsc, NULL, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result time_initial(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint64_t tsc;
+ int errcode;
+
+ errcode = pt_qry_time(decoder, &tsc, NULL, NULL);
+ ptu_int_eq(errcode, -pte_no_time);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result time(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint64_t tsc, exp;
+ int errcode;
+
+ exp = 0x11223344556677ull;
+
+ decoder->last_time.have_tsc = 1;
+ decoder->last_time.tsc = exp;
+
+ errcode = pt_qry_time(decoder, &tsc, NULL, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(tsc, exp);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cbr_null(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint32_t cbr;
+ int errcode;
+
+ errcode = pt_qry_core_bus_ratio(NULL, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_qry_core_bus_ratio(decoder, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_qry_core_bus_ratio(NULL, &cbr);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cbr_initial(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint32_t cbr;
+ int errcode;
+
+ errcode = pt_qry_core_bus_ratio(decoder, &cbr);
+ ptu_int_eq(errcode, -pte_no_cbr);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cbr(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint32_t cbr;
+ int errcode;
+
+ decoder->last_time.have_cbr = 1;
+ decoder->last_time.cbr = 42;
+
+ errcode = pt_qry_core_bus_ratio(decoder, &cbr);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(cbr, 42);
+
+ return ptu_passed();
+}
+
+/* Test that end-of-stream is indicated correctly when the stream ends with a
+ * partial non-query-relevant packet.
+ */
+static struct ptunit_result indir_cyc_cutoff(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip;
+ int errcode;
+
+ pt_encode_tip(encoder, 0xa000ull, pt_ipc_full);
+ pt_encode_cyc(encoder, 0xfff);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &ip);
+ ptu_int_eq(errcode, pts_eos);
+
+ return ptu_passed();
+}
+
+/* Test that end-of-stream is indicated correctly when the stream ends with a
+ * partial non-query-relevant packet.
+ */
+static struct ptunit_result cond_cyc_cutoff(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, taken;
+
+ pt_encode_tnt_8(encoder, 0, 1);
+ pt_encode_cyc(encoder, 0xfff);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, pts_eos);
+
+ return ptu_passed();
+}
+
+/* Test that end-of-stream is indicated correctly when the stream ends with a
+ * partial non-query-relevant packet.
+ */
+static struct ptunit_result event_cyc_cutoff(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_tip_pgd(encoder, 0ull, pt_ipc_full);
+ pt_encode_cyc(encoder, 0xffff);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptu_dfix_init(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_config *config = &dfix->config;
+ int errcode;
+
+ (void) memset(dfix->buffer, 0, sizeof(dfix->buffer));
+
+ pt_config_init(config);
+
+ config->begin = dfix->buffer;
+ config->end = dfix->buffer + sizeof(dfix->buffer);
+
+ errcode = pt_encoder_init(&dfix->encoder, config);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_qry_decoder_init(&dfix->decoder, config);
+ ptu_int_eq(errcode, 0);
+
+ dfix->decoder.ip.ip = pt_dfix_bad_ip;
+ dfix->decoder.ip.have_ip = 1;
+ dfix->decoder.ip.suppressed = 0;
+
+ dfix->last_ip = dfix->decoder.ip;
+
+ if (dfix->header)
+ dfix->header(dfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptu_dfix_fini(struct ptu_decoder_fixture *dfix)
+{
+ pt_qry_decoder_fini(&dfix->decoder);
+ pt_encoder_fini(&dfix->encoder);
+
+ return ptu_passed();
+}
+
+/* Synchronize the decoder at the beginnig of an empty buffer. */
+static struct ptunit_result
+ptu_dfix_header_sync(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+
+ /* Synchronize the decoder at the beginning of the buffer. */
+ decoder->pos = decoder->config.begin;
+
+ return ptu_passed();
+}
+
+/* Synchronize the decoder at the beginnig of a buffer containing packets that
+ * should be skipped for unconditional indirect branch queries.
+ */
+static struct ptunit_result
+ptu_dfix_header_indir(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+
+ pt_encode_pad(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0);
+
+ /* Synchronize the decoder at the beginning of the buffer. */
+ decoder->pos = decoder->config.begin;
+
+ return ptu_passed();
+}
+
+/* Synchronize the decoder at the beginnig of a buffer containing packets that
+ * should be skipped for unconditional indirect branch queries including a PSB.
+ */
+static struct ptunit_result
+ptu_dfix_header_indir_psb(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+
+ /* The psb must be empty since the tests won't skip status events.
+ * On the other hand, we do need to provide an address since tests
+ * may want to update last-ip, which requires a last-ip, of course.
+ */
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0);
+ pt_encode_psb(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0);
+ pt_encode_fup(encoder, pt_dfix_sext_ip, pt_ipc_sext_48);
+ pt_encode_psbend(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_pad(encoder);
+
+ /* Synchronize the decoder at the beginning of the buffer. */
+ decoder->pos = decoder->config.begin;
+
+ return ptu_passed();
+}
+
+/* Synchronize the decoder at the beginnig of a buffer containing packets that
+ * should be skipped for conditional branch queries.
+ */
+static struct ptunit_result
+ptu_dfix_header_cond(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+
+ /* The psb must be empty since the tests won't skip status events.
+ * On the other hand, we do need to provide an address since tests
+ * may want to update last-ip, which requires a last-ip, of course.
+ */
+ pt_encode_pad(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_psb(encoder);
+ pt_encode_tsc(encoder, 0);
+ pt_encode_pad(encoder);
+ pt_encode_fup(encoder, pt_dfix_sext_ip, pt_ipc_sext_48);
+ pt_encode_psbend(encoder);
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0);
+ pt_encode_pad(encoder);
+
+ /* Synchronize the decoder at the beginning of the buffer. */
+ decoder->pos = decoder->config.begin;
+
+ return ptu_passed();
+}
+
+/* Synchronize the decoder at the beginnig of a buffer containing packets that
+ * should be skipped for event queries.
+ */
+static struct ptunit_result
+ptu_dfix_header_event(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+
+ pt_encode_pad(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0x1000);
+
+ /* Synchronize the decoder at the beginning of the buffer. */
+ decoder->pos = decoder->config.begin;
+
+ return ptu_passed();
+}
+
+/* Synchronize the decoder at the beginnig of a buffer containing packets that
+ * should be skipped for event queries including a PSB.
+ */
+static struct ptunit_result
+ptu_dfix_header_event_psb(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+
+ /* The psb must be empty since the tests won't skip status events.
+ * On the other hand, we do need to provide an address since tests
+ * may want to update last-ip, which requires a last-ip, of course.
+ */
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0);
+ pt_encode_psb(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0x1000);
+ pt_encode_fup(encoder, pt_dfix_sext_ip, pt_ipc_sext_48);
+ pt_encode_psbend(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_pad(encoder);
+
+ /* Synchronize the decoder at the beginning of the buffer. */
+ decoder->pos = decoder->config.begin;
+
+ return ptu_passed();
+}
+
+static struct ptu_decoder_fixture dfix_raw;
+static struct ptu_decoder_fixture dfix_empty;
+static struct ptu_decoder_fixture dfix_indir;
+static struct ptu_decoder_fixture dfix_indir_psb;
+static struct ptu_decoder_fixture dfix_cond;
+static struct ptu_decoder_fixture dfix_event;
+static struct ptu_decoder_fixture dfix_event_psb;
+
+static void init_fixtures(void)
+{
+ dfix_raw.init = ptu_dfix_init;
+ dfix_raw.fini = ptu_dfix_fini;
+
+ dfix_empty = dfix_raw;
+ dfix_empty.header = ptu_dfix_header_sync;
+
+ dfix_indir = dfix_raw;
+ dfix_indir.header = ptu_dfix_header_indir;
+
+ dfix_indir_psb = dfix_raw;
+ dfix_indir_psb.header = ptu_dfix_header_indir_psb;
+
+ dfix_cond = dfix_raw;
+ dfix_cond.header = ptu_dfix_header_cond;
+
+ dfix_event = dfix_raw;
+ dfix_event.header = ptu_dfix_header_event;
+
+ dfix_event_psb = dfix_raw;
+ dfix_event_psb.header = ptu_dfix_header_event_psb;
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ init_fixtures();
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run_f(suite, indir_not_synced, dfix_raw);
+ ptu_run_f(suite, cond_not_synced, dfix_raw);
+ ptu_run_f(suite, event_not_synced, dfix_raw);
+
+ ptu_run_f(suite, sync_backward, dfix_raw);
+ ptu_run_f(suite, sync_backward_empty_end, dfix_raw);
+ ptu_run_f(suite, sync_backward_empty_mid, dfix_raw);
+ ptu_run_f(suite, sync_backward_empty_begin, dfix_raw);
+ ptu_run_f(suite, decode_sync_backward, dfix_raw);
+
+ ptu_run_f(suite, indir_null, dfix_empty);
+ ptu_run_f(suite, indir_empty, dfix_empty);
+ ptu_run_fp(suite, indir, dfix_empty, pt_ipc_suppressed);
+ ptu_run_fp(suite, indir, dfix_empty, pt_ipc_update_16);
+ ptu_run_fp(suite, indir, dfix_empty, pt_ipc_update_32);
+ ptu_run_fp(suite, indir, dfix_empty, pt_ipc_update_48);
+ ptu_run_fp(suite, indir, dfix_empty, pt_ipc_sext_48);
+ ptu_run_fp(suite, indir, dfix_empty, pt_ipc_full);
+ ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_suppressed);
+ ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_update_16);
+ ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_update_32);
+ ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_update_48);
+ ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_sext_48);
+ ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_full);
+ ptu_run_f(suite, indir_cutoff_fail, dfix_empty);
+ ptu_run_f(suite, indir_skip_tnt_fail, dfix_empty);
+ ptu_run_f(suite, indir_skip_tip_pge_fail, dfix_empty);
+ ptu_run_f(suite, indir_skip_tip_pgd_fail, dfix_empty);
+ ptu_run_f(suite, indir_skip_fup_tip_fail, dfix_empty);
+ ptu_run_f(suite, indir_skip_fup_tip_pgd_fail, dfix_empty);
+
+ ptu_run_fp(suite, indir, dfix_indir, pt_ipc_suppressed);
+ ptu_run_fp(suite, indir, dfix_indir, pt_ipc_update_16);
+ ptu_run_fp(suite, indir, dfix_indir, pt_ipc_update_32);
+ ptu_run_fp(suite, indir, dfix_indir, pt_ipc_update_48);
+ ptu_run_fp(suite, indir, dfix_indir, pt_ipc_sext_48);
+ ptu_run_fp(suite, indir, dfix_indir, pt_ipc_full);
+ ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_suppressed);
+ ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_update_16);
+ ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_update_32);
+ ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_update_48);
+ ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_sext_48);
+ ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_full);
+ ptu_run_f(suite, indir_cutoff_fail, dfix_indir);
+ ptu_run_f(suite, indir_skip_tnt_fail, dfix_indir);
+ ptu_run_f(suite, indir_skip_tip_pge_fail, dfix_indir);
+ ptu_run_f(suite, indir_skip_tip_pgd_fail, dfix_indir);
+ ptu_run_f(suite, indir_skip_fup_tip_fail, dfix_indir);
+ ptu_run_f(suite, indir_skip_fup_tip_pgd_fail, dfix_indir);
+
+ ptu_run_fp(suite, indir, dfix_indir_psb, pt_ipc_suppressed);
+ ptu_run_fp(suite, indir, dfix_indir_psb, pt_ipc_sext_48);
+ ptu_run_fp(suite, indir, dfix_indir_psb, pt_ipc_full);
+ ptu_run_fp(suite, indir_tnt, dfix_indir_psb, pt_ipc_suppressed);
+ ptu_run_fp(suite, indir_tnt, dfix_indir_psb, pt_ipc_sext_48);
+ ptu_run_fp(suite, indir_tnt, dfix_indir_psb, pt_ipc_full);
+ ptu_run_f(suite, indir_cutoff_fail, dfix_indir_psb);
+ ptu_run_f(suite, indir_skip_tnt_fail, dfix_indir_psb);
+ ptu_run_f(suite, indir_skip_tip_pge_fail, dfix_indir_psb);
+ ptu_run_f(suite, indir_skip_tip_pgd_fail, dfix_indir_psb);
+ ptu_run_f(suite, indir_skip_fup_tip_fail, dfix_indir_psb);
+ ptu_run_f(suite, indir_skip_fup_tip_pgd_fail, dfix_indir_psb);
+
+ ptu_run_f(suite, cond_null, dfix_empty);
+ ptu_run_f(suite, cond_empty, dfix_empty);
+ ptu_run_f(suite, cond, dfix_empty);
+ ptu_run_f(suite, cond_skip_tip_fail, dfix_empty);
+ ptu_run_f(suite, cond_skip_tip_pge_fail, dfix_empty);
+ ptu_run_f(suite, cond_skip_tip_pgd_fail, dfix_empty);
+ ptu_run_f(suite, cond_skip_fup_tip_fail, dfix_empty);
+ ptu_run_f(suite, cond_skip_fup_tip_pgd_fail, dfix_empty);
+
+ ptu_run_f(suite, cond, dfix_cond);
+ ptu_run_f(suite, cond_skip_tip_fail, dfix_cond);
+ ptu_run_f(suite, cond_skip_tip_pge_fail, dfix_cond);
+ ptu_run_f(suite, cond_skip_tip_pgd_fail, dfix_cond);
+ ptu_run_f(suite, cond_skip_fup_tip_fail, dfix_cond);
+ ptu_run_f(suite, cond_skip_fup_tip_pgd_fail, dfix_cond);
+
+ ptu_run_f(suite, event_null, dfix_empty);
+ ptu_run_f(suite, event_bad_size, dfix_empty);
+ ptu_run_f(suite, event_small_size, dfix_empty);
+ ptu_run_f(suite, event_big_size, dfix_empty);
+ ptu_run_f(suite, event_empty, dfix_empty);
+ ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_suppressed, 0);
+ ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_update_16, 0);
+ ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_update_32, 0);
+ ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_update_48, 0);
+ ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_sext_48, 0);
+ ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_full, 0);
+ ptu_run_f(suite, event_enabled_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_suppressed, 0);
+ ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_update_16, 0);
+ ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_update_32, 0);
+ ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_update_48, 0);
+ ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_sext_48, 0);
+ ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_full, 0);
+ ptu_run_f(suite, event_disabled_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_suppressed,
+ 0);
+ ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_update_16,
+ 0);
+ ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_update_32,
+ 0);
+ ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_update_48,
+ 0);
+ ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_sext_48, 0);
+ ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_full, 0);
+ ptu_run_f(suite, event_async_disabled_suppressed_fail, dfix_empty);
+ ptu_run_f(suite, event_async_disabled_cutoff_fail_a, dfix_empty);
+ ptu_run_f(suite, event_async_disabled_cutoff_fail_b, dfix_empty);
+ ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_suppressed, 0);
+ ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_update_16, 0);
+ ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_update_32, 0);
+ ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_update_48, 0);
+ ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_sext_48, 0);
+ ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_full, 0);
+ ptu_run_f(suite, event_async_branch_suppressed_fail, dfix_empty);
+ ptu_run_f(suite, event_async_branch_cutoff_fail_a, dfix_empty);
+ ptu_run_f(suite, event_async_branch_cutoff_fail_b, dfix_empty);
+ ptu_run_fp(suite, event_paging, dfix_empty, 0, 0);
+ ptu_run_fp(suite, event_paging, dfix_empty, pt_pl_pip_nr, 0);
+ ptu_run_f(suite, event_paging_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_async_paging, dfix_empty, 0, 0);
+ ptu_run_fp(suite, event_async_paging, dfix_empty, pt_pl_pip_nr, 0);
+ ptu_run_fp(suite, event_async_paging_suppressed, dfix_empty, 0, 0);
+ ptu_run_fp(suite, event_async_paging_suppressed, dfix_empty,
+ pt_pl_pip_nr, 0);
+ ptu_run_f(suite, event_async_paging_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_suppressed, 0);
+ ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_update_16, 0);
+ ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_update_32, 0);
+ ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_update_48, 0);
+ ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_sext_48, 0);
+ ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_full, 0);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty,
+ pt_ipc_suppressed, 0);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_update_16,
+ 0);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_update_32,
+ 0);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_update_48,
+ 0);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_sext_48,
+ 0);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_full,
+ 0);
+ ptu_run_f(suite, event_overflow_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_stop, dfix_empty, 0);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_suppressed,
+ 0);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_update_16, 0);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_update_32, 0);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_update_48, 0);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_sext_48, 0);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_full, 0);
+ ptu_run_f(suite, event_exec_mode_tip_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty,
+ pt_ipc_suppressed, 0);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty,
+ pt_ipc_update_16, 0);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty,
+ pt_ipc_update_32, 0);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty,
+ pt_ipc_update_48, 0);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty, pt_ipc_sext_48,
+ 0);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty, pt_ipc_full,
+ 0);
+ ptu_run_f(suite, event_exec_mode_tip_pge_cutoff_fail, dfix_empty);
+ ptu_run_f(suite, event_exec_mode_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_suppressed,
+ pt_mob_tsx_intx, 0);
+ ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_update_16, 0, 0);
+ ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_update_32,
+ pt_mob_tsx_intx, 0);
+ ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_update_48,
+ pt_mob_tsx_intx, 0);
+ ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_sext_48, 0, 0);
+ ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_full, 0, 0);
+ ptu_run_f(suite, event_tsx_fup_cutoff_fail, dfix_empty);
+ ptu_run_f(suite, event_tsx_cutoff_fail, dfix_empty);
+ ptu_run_f(suite, event_skip_tip_fail, dfix_empty);
+ ptu_run_f(suite, event_skip_tnt_8_fail, dfix_empty);
+ ptu_run_f(suite, event_skip_tnt_64_fail, dfix_empty);
+ ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_suppressed);
+ ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_update_16);
+ ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_update_32);
+ ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_update_48);
+ ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_sext_48);
+ ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_full);
+ ptu_run_f(suite, sync_event_cutoff_fail, dfix_empty);
+ ptu_run_f(suite, sync_event_incomplete_fail, dfix_empty);
+ ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_suppressed);
+ ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_update_16);
+ ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_update_32);
+ ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_update_48);
+ ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_sext_48);
+ ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_full);
+ ptu_run_f(suite, sync_ovf_event_cutoff_fail, dfix_empty);
+
+ ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_suppressed, 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_update_16, 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_update_32, 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_update_48, 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_sext_48, 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_full, 0x1000);
+ ptu_run_f(suite, event_enabled_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_update_16, 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_update_32, 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_update_48, 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_sext_48, 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_full, 0x1000);
+ ptu_run_f(suite, event_disabled_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_update_16,
+ 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_update_32,
+ 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_update_48,
+ 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_async_disabled_suppressed_fail, dfix_event);
+ ptu_run_f(suite, event_async_disabled_cutoff_fail_a, dfix_event);
+ ptu_run_f(suite, event_async_disabled_cutoff_fail_b, dfix_event);
+ ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_update_16,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_update_32,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_update_48,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_async_branch_suppressed_fail, dfix_event);
+ ptu_run_f(suite, event_async_branch_cutoff_fail_a, dfix_event);
+ ptu_run_f(suite, event_async_branch_cutoff_fail_b, dfix_event);
+ ptu_run_fp(suite, event_paging, dfix_event, 0, 0x1000);
+ ptu_run_fp(suite, event_paging, dfix_event, pt_pl_pip_nr, 0x1000);
+ ptu_run_f(suite, event_paging_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_async_paging, dfix_event, 0, 0x1000);
+ ptu_run_fp(suite, event_async_paging, dfix_event, pt_pl_pip_nr, 0x1000);
+ ptu_run_fp(suite, event_async_paging_suppressed, dfix_event, 0, 0x1000);
+ ptu_run_fp(suite, event_async_paging_suppressed, dfix_event,
+ pt_pl_pip_nr, 0x1000);
+ ptu_run_f(suite, event_async_paging_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_update_16,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_update_32,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_update_48,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_full,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_event,
+ pt_ipc_suppressed, 0x1000);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_update_16,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_update_32,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_update_48,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_overflow_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_stop, dfix_event, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_update_16,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_update_32,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_update_48,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_exec_mode_tip_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event,
+ pt_ipc_suppressed, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event,
+ pt_ipc_update_16, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event,
+ pt_ipc_update_32, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event,
+ pt_ipc_update_48, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_exec_mode_tip_pge_cutoff_fail, dfix_event);
+ ptu_run_f(suite, event_exec_mode_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_suppressed, 0,
+ 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_update_16,
+ pt_mob_tsx_intx, 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_update_32, 0,
+ 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_update_48, 0,
+ 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_sext_48,
+ pt_mob_tsx_intx, 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_full,
+ pt_mob_tsx_intx, 0x1000);
+ ptu_run_f(suite, event_tsx_fup_cutoff_fail, dfix_event);
+ ptu_run_f(suite, event_tsx_cutoff_fail, dfix_event);
+ ptu_run_f(suite, event_skip_tip_fail, dfix_event);
+ ptu_run_f(suite, event_skip_tnt_8_fail, dfix_event);
+ ptu_run_f(suite, event_skip_tnt_64_fail, dfix_event);
+ ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_suppressed);
+ ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_update_16);
+ ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_update_32);
+ ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_update_48);
+ ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_sext_48);
+ ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_full);
+ ptu_run_f(suite, sync_event_cutoff_fail, dfix_event);
+ ptu_run_f(suite, sync_event_incomplete_fail, dfix_event);
+ ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_suppressed);
+ ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_update_16);
+ ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_update_32);
+ ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_update_48);
+ ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_sext_48);
+ ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_full);
+ ptu_run_f(suite, sync_ovf_event_cutoff_fail, dfix_event);
+
+ ptu_run_fp(suite, event_enabled, dfix_event_psb, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event_psb, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event_psb, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_enabled_cutoff_fail, dfix_event_psb);
+ ptu_run_fp(suite, event_disabled, dfix_event_psb, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event_psb, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event_psb, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_disabled_cutoff_fail, dfix_event_psb);
+ ptu_run_fp(suite, event_async_disabled, dfix_event_psb,
+ pt_ipc_suppressed, 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event_psb,
+ pt_ipc_update_16, 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event_psb,
+ pt_ipc_update_32, 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event_psb,
+ pt_ipc_update_48, 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event_psb,
+ pt_ipc_sext_48, 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event_psb,
+ pt_ipc_full, 0x1000);
+ ptu_run_f(suite, event_async_disabled_suppressed_fail, dfix_event_psb);
+ ptu_run_f(suite, event_async_disabled_cutoff_fail_a, dfix_event_psb);
+ ptu_run_f(suite, event_async_disabled_cutoff_fail_b, dfix_event_psb);
+ ptu_run_fp(suite, event_async_branch, dfix_event_psb,
+ pt_ipc_suppressed, 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_update_16,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_update_32,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_update_48,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_async_branch_suppressed_fail, dfix_event_psb);
+ ptu_run_f(suite, event_async_branch_cutoff_fail_a, dfix_event_psb);
+ ptu_run_f(suite, event_async_branch_cutoff_fail_b, dfix_event_psb);
+ ptu_run_fp(suite, event_paging, dfix_event_psb, 0, 0x1000);
+ ptu_run_fp(suite, event_paging, dfix_event_psb, pt_pl_pip_nr, 0x1000);
+ ptu_run_f(suite, event_paging_cutoff_fail, dfix_event_psb);
+ ptu_run_fp(suite, event_async_paging, dfix_event_psb, 0, 0x1000);
+ ptu_run_fp(suite, event_async_paging, dfix_event_psb, pt_pl_pip_nr,
+ 0x1000);
+ ptu_run_fp(suite, event_async_paging_suppressed, dfix_event_psb, 0,
+ 0x1000);
+ ptu_run_fp(suite, event_async_paging_suppressed, dfix_event_psb,
+ pt_pl_pip_nr, 0x1000);
+ ptu_run_f(suite, event_async_paging_cutoff_fail, dfix_event_psb);
+ ptu_run_f(suite, event_overflow_cutoff_fail, dfix_event_psb);
+ ptu_run_fp(suite, event_stop, dfix_event_psb, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event_psb,
+ pt_ipc_suppressed, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event_psb, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event_psb, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_exec_mode_tip_cutoff_fail, dfix_event_psb);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event_psb,
+ pt_ipc_sext_48, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event_psb,
+ pt_ipc_full, 0x1000);
+ ptu_run_f(suite, event_exec_mode_tip_pge_cutoff_fail, dfix_event_psb);
+ ptu_run_f(suite, event_exec_mode_cutoff_fail, dfix_event_psb);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event_psb, pt_ipc_suppressed, 0,
+ 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event_psb, pt_ipc_sext_48,
+ pt_mob_tsx_intx, 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event_psb, pt_ipc_full,
+ pt_mob_tsx_intx, 0x1000);
+ ptu_run_f(suite, event_tsx_fup_cutoff_fail, dfix_event_psb);
+ ptu_run_f(suite, event_tsx_cutoff_fail, dfix_event_psb);
+ ptu_run_f(suite, event_skip_tip_fail, dfix_event_psb);
+ ptu_run_f(suite, event_skip_tnt_8_fail, dfix_event_psb);
+ ptu_run_f(suite, event_skip_tnt_64_fail, dfix_event_psb);
+
+ ptu_run_f(suite, time_null_fail, dfix_empty);
+ ptu_run_f(suite, time_initial, dfix_empty);
+ ptu_run_f(suite, time, dfix_empty);
+
+ ptu_run_f(suite, cbr_null, dfix_empty);
+ ptu_run_f(suite, cbr_initial, dfix_empty);
+ ptu_run_f(suite, cbr, dfix_empty);
+
+ ptu_run_f(suite, indir_cyc_cutoff, dfix_empty);
+ ptu_run_f(suite, cond_cyc_cutoff, dfix_empty);
+ ptu_run_f(suite, event_cyc_cutoff, dfix_empty);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-retstack.c b/libipt/test/src/ptunit-retstack.c
new file mode 100644
index 000000000000..743eee7b127d
--- /dev/null
+++ b/libipt/test/src/ptunit-retstack.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_retstack.h"
+
+#include "intel-pt.h"
+
+
+static struct ptunit_result init(void)
+{
+ struct pt_retstack retstack;
+ int status;
+
+ memset(&retstack, 0xcd, sizeof(retstack));
+
+ pt_retstack_init(&retstack);
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_ne(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_null(void)
+{
+ pt_retstack_init(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query(void)
+{
+ struct pt_retstack retstack;
+ uint64_t ip;
+ int status;
+
+ pt_retstack_init(&retstack);
+
+ status = pt_retstack_push(&retstack, 0x42ull);
+ ptu_int_eq(status, 0);
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_eq(status, 0);
+
+ status = pt_retstack_pop(&retstack, &ip);
+ ptu_int_eq(status, 0);
+ ptu_uint_eq(ip, 0x42ull);
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_ne(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_empty(void)
+{
+ struct pt_retstack retstack;
+ uint64_t ip;
+ int status;
+
+ pt_retstack_init(&retstack);
+
+ ip = 0x42ull;
+ status = pt_retstack_pop(&retstack, &ip);
+ ptu_int_eq(status, -pte_retstack_empty);
+ ptu_uint_eq(ip, 0x42ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_null(void)
+{
+ uint64_t ip;
+ int status;
+
+ ip = 0x42ull;
+ status = pt_retstack_pop(NULL, &ip);
+ ptu_int_eq(status, -pte_invalid);
+ ptu_uint_eq(ip, 0x42ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pop(void)
+{
+ struct pt_retstack retstack;
+ int status;
+
+ pt_retstack_init(&retstack);
+
+ status = pt_retstack_push(&retstack, 0x42ull);
+ ptu_int_eq(status, 0);
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_eq(status, 0);
+
+ status = pt_retstack_pop(&retstack, NULL);
+ ptu_int_eq(status, 0);
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_ne(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pop_empty(void)
+{
+ struct pt_retstack retstack;
+ int status;
+
+ pt_retstack_init(&retstack);
+
+ status = pt_retstack_pop(&retstack, NULL);
+ ptu_int_eq(status, -pte_retstack_empty);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pop_null(void)
+{
+ int status;
+
+ status = pt_retstack_pop(NULL, NULL);
+ ptu_int_eq(status, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result full(void)
+{
+ struct pt_retstack retstack;
+ uint64_t ip, idx;
+ int status;
+
+ pt_retstack_init(&retstack);
+
+ for (idx = 0; idx < pt_retstack_size; ++idx) {
+ status = pt_retstack_push(&retstack, idx);
+ ptu_int_eq(status, 0);
+ }
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_eq(status, 0);
+
+ for (idx = pt_retstack_size; idx > 0;) {
+ idx -= 1;
+
+ status = pt_retstack_pop(&retstack, &ip);
+ ptu_int_eq(status, 0);
+ ptu_uint_eq(ip, idx);
+ }
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_ne(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result overflow(void)
+{
+ struct pt_retstack retstack;
+ uint64_t ip, idx;
+ int status;
+
+ pt_retstack_init(&retstack);
+
+ for (idx = 0; idx <= pt_retstack_size; ++idx) {
+ status = pt_retstack_push(&retstack, idx);
+ ptu_int_eq(status, 0);
+ }
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_eq(status, 0);
+
+ for (idx = pt_retstack_size; idx > 0; --idx) {
+ status = pt_retstack_pop(&retstack, &ip);
+ ptu_int_eq(status, 0);
+ ptu_uint_eq(ip, idx);
+ }
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_ne(status, 0);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init);
+ ptu_run(suite, init_null);
+ ptu_run(suite, query);
+ ptu_run(suite, query_empty);
+ ptu_run(suite, query_null);
+ ptu_run(suite, pop);
+ ptu_run(suite, pop_empty);
+ ptu_run(suite, pop_null);
+ ptu_run(suite, full);
+ ptu_run(suite, overflow);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-section-file.c b/libipt/test/src/ptunit-section-file.c
new file mode 100644
index 000000000000..753de13a3d21
--- /dev/null
+++ b/libipt/test/src/ptunit-section-file.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_section.h"
+#include "pt_section_file.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+
+
+/* This is a variation of ptunit-section.c.
+ *
+ * We provide pt_section_map() et.al. that are normally provided by mmap-based
+ * section implementations. Our implementation falls back to file-based
+ * sections so we're able to test them.
+ *
+ * The actual test is in ptunit-section.c.
+ */
+
+/* The file status used for detecting changes to a file between unmap and map.
+ *
+ * In our case, the changes always affect the size of the file.
+ */
+struct pt_file_status {
+ /* The size in bytes. */
+ long size;
+};
+
+int pt_section_mk_status(void **pstatus, uint64_t *psize, const char *filename)
+{
+ struct pt_file_status *status;
+ FILE *file;
+ long size;
+ int errcode;
+
+ if (!pstatus || !psize)
+ return -pte_internal;
+
+ file = fopen(filename, "rb");
+ if (!file)
+ return -pte_bad_image;
+
+ errcode = fseek(file, 0, SEEK_END);
+ if (errcode) {
+ errcode = -pte_bad_image;
+ goto out_file;
+ }
+
+ size = ftell(file);
+ if (size < 0) {
+ errcode = -pte_bad_image;
+ goto out_file;
+ }
+
+ status = malloc(sizeof(*status));
+ if (!status) {
+ errcode = -pte_nomem;
+ goto out_file;
+ }
+
+ status->size = size;
+
+ *pstatus = status;
+ *psize = (uint64_t) size;
+
+ errcode = 0;
+
+out_file:
+ fclose(file);
+ return errcode;
+}
+
+static int pt_section_map_success(struct pt_section *section)
+{
+ uint16_t mcount;
+ int errcode, status;
+
+ if (!section)
+ return -pte_internal;
+
+ mcount = section->mcount + 1;
+ if (!mcount) {
+ (void) pt_section_unlock(section);
+ return -pte_overflow;
+ }
+
+ section->mcount = mcount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_section_on_map(section);
+ if (status < 0) {
+ (void) pt_section_unmap(section);
+ return status;
+ }
+
+ return 0;
+}
+
+int pt_section_map(struct pt_section *section)
+{
+ struct pt_file_status *status;
+ const char *filename;
+ uint16_t mcount;
+ FILE *file;
+ long size;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ mcount = section->mcount;
+ if (mcount)
+ return pt_section_map_success(section);
+
+ if (section->mapping)
+ goto out_unlock;
+
+ filename = section->filename;
+ if (!filename)
+ goto out_unlock;
+
+ status = section->status;
+ if (!status)
+ goto out_unlock;
+
+ errcode = -pte_bad_image;
+ file = fopen(filename, "rb");
+ if (!file)
+ goto out_unlock;
+
+ errcode = fseek(file, 0, SEEK_END);
+ if (errcode) {
+ errcode = -pte_bad_image;
+ goto out_file;
+ }
+
+ errcode = -pte_bad_image;
+ size = ftell(file);
+ if (size < 0)
+ goto out_file;
+
+ if (size != status->size)
+ goto out_file;
+
+ /* We need to keep the file open on success. It will be closed when
+ * the section is unmapped.
+ */
+ errcode = pt_sec_file_map(section, file);
+ if (!errcode)
+ return pt_section_map_success(section);
+
+out_file:
+ fclose(file);
+
+out_unlock:
+ (void) pt_section_unlock(section);
+ return errcode;
+}
diff --git a/libipt/test/src/ptunit-section.c b/libipt/test/src/ptunit-section.c
new file mode 100644
index 000000000000..058bf853589e
--- /dev/null
+++ b/libipt/test/src/ptunit-section.c
@@ -0,0 +1,1396 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit_threads.h"
+#include "ptunit_mkfile.h"
+
+#include "pt_section.h"
+#include "pt_block_cache.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+
+
+
+struct pt_image_section_cache {
+ int map;
+};
+
+extern int pt_iscache_notify_map(struct pt_image_section_cache *iscache,
+ struct pt_section *section);
+extern int pt_iscache_notify_resize(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t size);
+
+int pt_iscache_notify_map(struct pt_image_section_cache *iscache,
+ struct pt_section *section)
+{
+ if (!iscache)
+ return -pte_internal;
+
+ if (iscache->map <= 0)
+ return iscache->map;
+
+ /* Avoid recursion. */
+ iscache->map = 0;
+
+ return pt_section_map_share(section);
+}
+
+int pt_iscache_notify_resize(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t size)
+{
+ uint64_t memsize;
+ int errcode;
+
+ if (!iscache)
+ return -pte_internal;
+
+ if (iscache->map <= 0)
+ return iscache->map;
+
+ /* Avoid recursion. */
+ iscache->map = 0;
+
+ errcode = pt_section_memsize(section, &memsize);
+ if (errcode < 0)
+ return errcode;
+
+ if (size != memsize)
+ return -pte_internal;
+
+ return pt_section_map_share(section);
+}
+
+struct pt_block_cache *pt_bcache_alloc(uint64_t nentries)
+{
+ struct pt_block_cache *bcache;
+
+ if (!nentries || (UINT32_MAX < nentries))
+ return NULL;
+
+ /* The cache is not really used by tests. It suffices to allocate only
+ * the cache struct with the single default entry.
+ *
+ * We still set the number of entries to the requested size.
+ */
+ bcache = malloc(sizeof(*bcache));
+ if (bcache)
+ bcache->nentries = (uint32_t) nentries;
+
+ return bcache;
+}
+
+void pt_bcache_free(struct pt_block_cache *bcache)
+{
+ free(bcache);
+}
+
+/* A test fixture providing a temporary file and an initially NULL section. */
+struct section_fixture {
+ /* Threading support. */
+ struct ptunit_thrd_fixture thrd;
+
+ /* A temporary file name. */
+ char *name;
+
+ /* That file opened for writing. */
+ FILE *file;
+
+ /* The section. */
+ struct pt_section *section;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct section_fixture *);
+ struct ptunit_result (*fini)(struct section_fixture *);
+};
+
+enum {
+#if defined(FEATURE_THREADS)
+
+ num_threads = 4,
+
+#endif /* defined(FEATURE_THREADS) */
+
+ num_work = 0x4000
+};
+
+static struct ptunit_result sfix_write_aux(struct section_fixture *sfix,
+ const uint8_t *buffer, size_t size)
+{
+ size_t written;
+
+ written = fwrite(buffer, 1, size, sfix->file);
+ ptu_uint_eq(written, size);
+
+ fflush(sfix->file);
+
+ return ptu_passed();
+}
+
+#define sfix_write(sfix, buffer) \
+ ptu_check(sfix_write_aux, sfix, buffer, sizeof(buffer))
+
+static struct ptunit_result create(struct section_fixture *sfix)
+{
+ const char *name;
+ uint8_t bytes[] = { 0xcc, 0xcc, 0xcc, 0xcc, 0xcc };
+ uint64_t offset, size;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ name = pt_section_filename(sfix->section);
+ ptu_str_eq(name, sfix->name);
+
+ offset = pt_section_offset(sfix->section);
+ ptu_uint_eq(offset, 0x1ull);
+
+ size = pt_section_size(sfix->section);
+ ptu_uint_eq(size, 0x3ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result create_bad_offset(struct section_fixture *sfix)
+{
+ sfix->section = pt_mk_section(sfix->name, 0x10ull, 0x0ull);
+ ptu_null(sfix->section);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result create_truncated(struct section_fixture *sfix)
+{
+ const char *name;
+ uint8_t bytes[] = { 0xcc, 0xcc, 0xcc, 0xcc, 0xcc };
+ uint64_t offset, size;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, UINT64_MAX);
+ ptu_ptr(sfix->section);
+
+ name = pt_section_filename(sfix->section);
+ ptu_str_eq(name, sfix->name);
+
+ offset = pt_section_offset(sfix->section);
+ ptu_uint_eq(offset, 0x1ull);
+
+ size = pt_section_size(sfix->section);
+ ptu_uint_eq(size, sizeof(bytes) - 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result create_empty(struct section_fixture *sfix)
+{
+ sfix->section = pt_mk_section(sfix->name, 0x0ull, 0x10ull);
+ ptu_null(sfix->section);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result filename_null(void)
+{
+ const char *name;
+
+ name = pt_section_filename(NULL);
+ ptu_null(name);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result size_null(void)
+{
+ uint64_t size;
+
+ size = pt_section_size(NULL);
+ ptu_uint_eq(size, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result memsize_null(struct section_fixture *sfix)
+{
+ uint64_t size;
+ int errcode;
+
+ errcode = pt_section_memsize(NULL, &size);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_memsize(sfix->section, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_memsize(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result offset_null(void)
+{
+ uint64_t offset;
+
+ offset = pt_section_offset(NULL);
+ ptu_uint_eq(offset, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result get_null(void)
+{
+ int errcode;
+
+ errcode = pt_section_get(NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result put_null(void)
+{
+ int errcode;
+
+ errcode = pt_section_put(NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_null(void)
+{
+ struct pt_image_section_cache iscache;
+ struct pt_section section;
+ int errcode;
+
+ errcode = pt_section_attach(NULL, &iscache);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_attach(&section, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_attach(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result detach_null(void)
+{
+ struct pt_image_section_cache iscache;
+ struct pt_section section;
+ int errcode;
+
+ errcode = pt_section_detach(NULL, &iscache);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_detach(&section, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_detach(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result map_null(void)
+{
+ int errcode;
+
+ errcode = pt_section_map(NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unmap_null(void)
+{
+ int errcode;
+
+ errcode = pt_section_unmap(NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cache_null(void)
+{
+ struct pt_block_cache *bcache;
+
+ bcache = pt_section_bcache(NULL);
+ ptu_null(bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result get_overflow(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix->section->ucount = UINT16_MAX;
+
+ errcode = pt_section_get(sfix->section);
+ ptu_int_eq(errcode, -pte_overflow);
+
+ sfix->section->ucount = 1;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_overflow(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix->section->acount = UINT16_MAX;
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, -pte_overflow);
+
+ sfix->section->acount = 0;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_bad_ucount(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix->section->acount = 2;
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, -pte_internal);
+
+ sfix->section->acount = 0;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result map_change(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix_write(sfix, bytes);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, -pte_bad_image);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result map_put(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_put(sfix->section);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unmap_nomap(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result map_overflow(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix->section->mcount = UINT16_MAX;
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, -pte_overflow);
+
+ sfix->section->mcount = 0;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result get_put(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_get(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_get(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_put(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_put(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_detach(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix->section->ucount += 2;
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ sfix->section->ucount -= 2;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_bad_iscache(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache, bad;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix->section->ucount += 2;
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_attach(sfix->section, &bad);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ sfix->section->ucount -= 2;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result detach_bad_iscache(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache, bad;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_detach(sfix->section, &bad);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result map_unmap(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_map(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ iscache.map = 0;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ ptu_uint_eq(sfix->section->mcount, 2);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_bad_map(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ iscache.map = -pte_eos;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, -pte_eos);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_map_overflow(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ iscache.map = 1;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ sfix->section->mcount = UINT16_MAX - 1;
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, -pte_overflow);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x0ull);
+ ptu_int_eq(status, 2);
+ ptu_uint_eq(buffer[0], bytes[1]);
+ ptu_uint_eq(buffer[1], bytes[2]);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_null(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ uint8_t buffer[] = { 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, NULL, 1, 0x0ull);
+ ptu_int_eq(status, -pte_internal);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ status = pt_section_read(NULL, buffer, 1, 0x0ull);
+ ptu_int_eq(status, -pte_internal);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_offset(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x1ull);
+ ptu_int_eq(status, 2);
+ ptu_uint_eq(buffer[0], bytes[2]);
+ ptu_uint_eq(buffer[1], bytes[3]);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_truncated(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc, 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x2ull);
+ ptu_int_eq(status, 1);
+ ptu_uint_eq(buffer[0], bytes[3]);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_from_truncated(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc, 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x2ull, 0x10ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x1ull);
+ ptu_int_eq(status, 1);
+ ptu_uint_eq(buffer[0], bytes[3]);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_nomem(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 1, 0x3ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_overflow(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 1,
+ 0xffffffffffff0000ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_overflow_32bit(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 1,
+ 0xff00000000ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_nomap(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_read(sfix->section, buffer, 1, 0x0ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_unmap_map(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x0ull);
+ ptu_int_eq(status, 2);
+ ptu_uint_eq(buffer[0], bytes[1]);
+ ptu_uint_eq(buffer[1], bytes[2]);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ memset(buffer, 0xcc, sizeof(buffer));
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x0ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_uint_eq(buffer[0], 0xcc);
+ ptu_uint_eq(buffer[1], 0xcc);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x0ull);
+ ptu_int_eq(status, 2);
+ ptu_uint_eq(buffer[0], bytes[1]);
+ ptu_uint_eq(buffer[1], bytes[2]);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static int worker_read(void *arg)
+{
+ struct section_fixture *sfix;
+ int it, errcode;
+
+ sfix = arg;
+ if (!sfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_work; ++it) {
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int read;
+
+ errcode = pt_section_get(sfix->section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_map(sfix->section);
+ if (errcode < 0)
+ goto out_put;
+
+ read = pt_section_read(sfix->section, buffer, 2, 0x0ull);
+ if (read < 0)
+ goto out_unmap;
+
+ errcode = -pte_invalid;
+ if ((read != 2) || (buffer[0] != 0x2) || (buffer[1] != 0x4))
+ goto out_unmap;
+
+ errcode = pt_section_unmap(sfix->section);
+ if (errcode < 0)
+ goto out_put;
+
+ errcode = pt_section_put(sfix->section);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+
+out_unmap:
+ (void) pt_section_unmap(sfix->section);
+
+out_put:
+ (void) pt_section_put(sfix->section);
+ return errcode;
+}
+
+static int worker_bcache(void *arg)
+{
+ struct section_fixture *sfix;
+ int it, errcode;
+
+ sfix = arg;
+ if (!sfix)
+ return -pte_internal;
+
+ errcode = pt_section_get(sfix->section);
+ if (errcode < 0)
+ return errcode;
+
+ for (it = 0; it < num_work; ++it) {
+ struct pt_block_cache *bcache;
+
+ errcode = pt_section_map(sfix->section);
+ if (errcode < 0)
+ goto out_put;
+
+ errcode = pt_section_request_bcache(sfix->section);
+ if (errcode < 0)
+ goto out_unmap;
+
+ bcache = pt_section_bcache(sfix->section);
+ if (!bcache) {
+ errcode = -pte_nomem;
+ goto out_unmap;
+ }
+
+ errcode = pt_section_unmap(sfix->section);
+ if (errcode < 0)
+ goto out_put;
+ }
+
+ return pt_section_put(sfix->section);
+
+out_unmap:
+ (void) pt_section_unmap(sfix->section);
+
+out_put:
+ (void) pt_section_put(sfix->section);
+ return errcode;
+}
+
+static struct ptunit_result stress(struct section_fixture *sfix,
+ int (*worker)(void *))
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+#if defined(FEATURE_THREADS)
+ {
+ int thrd;
+
+ for (thrd = 0; thrd < num_threads; ++thrd)
+ ptu_test(ptunit_thrd_create, &sfix->thrd, worker, sfix);
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ errcode = worker(sfix);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_no_bcache(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ struct pt_block_cache *bcache;
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ bcache = pt_section_bcache(sfix->section);
+ ptu_null(bcache);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bcache_alloc_free(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ struct pt_block_cache *bcache;
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_alloc_bcache(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ bcache = pt_section_bcache(sfix->section);
+ ptu_ptr(bcache);
+ ptu_uint_eq(bcache->nentries, sfix->section->size);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ bcache = pt_section_bcache(sfix->section);
+ ptu_null(bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bcache_alloc_twice(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_alloc_bcache(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_alloc_bcache(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bcache_alloc_nomap(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_alloc_bcache(sfix->section);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result memsize_nomap(struct section_fixture *sfix)
+{
+ uint64_t memsize;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_memsize(sfix->section, &memsize);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(memsize, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result memsize_unmap(struct section_fixture *sfix)
+{
+ uint64_t memsize;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_memsize(sfix->section, &memsize);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(memsize, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result memsize_map_nobcache(struct section_fixture *sfix)
+{
+ uint64_t memsize;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ memsize = 0xfefefefefefefefeull;
+
+ errcode = pt_section_memsize(sfix->section, &memsize);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_ge(memsize, 0ull);
+ ptu_uint_le(memsize, 0x2000ull);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result memsize_map_bcache(struct section_fixture *sfix)
+{
+ uint64_t memsize;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_alloc_bcache(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_memsize(sfix->section, &memsize);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_ge(memsize,
+ sfix->section->size * sizeof(struct pt_bcache_entry));
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sfix_init(struct section_fixture *sfix)
+{
+ int errcode;
+
+ sfix->section = NULL;
+ sfix->file = NULL;
+ sfix->name = NULL;
+
+ errcode = ptunit_mkfile(&sfix->file, &sfix->name, "wb");
+ ptu_int_eq(errcode, 0);
+
+ ptu_test(ptunit_thrd_init, &sfix->thrd);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sfix_fini(struct section_fixture *sfix)
+{
+ int thrd;
+
+ ptu_test(ptunit_thrd_fini, &sfix->thrd);
+
+ for (thrd = 0; thrd < sfix->thrd.nthreads; ++thrd)
+ ptu_int_eq(sfix->thrd.result[thrd], 0);
+
+ if (sfix->section) {
+ pt_section_put(sfix->section);
+ sfix->section = NULL;
+ }
+
+ if (sfix->file) {
+ fclose(sfix->file);
+ sfix->file = NULL;
+
+ if (sfix->name)
+ remove(sfix->name);
+ }
+
+ if (sfix->name) {
+ free(sfix->name);
+ sfix->name = NULL;
+ }
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct section_fixture sfix;
+ struct ptunit_suite suite;
+
+ sfix.init = sfix_init;
+ sfix.fini = sfix_fini;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run_f(suite, create, sfix);
+ ptu_run_f(suite, create_bad_offset, sfix);
+ ptu_run_f(suite, create_truncated, sfix);
+ ptu_run_f(suite, create_empty, sfix);
+
+ ptu_run(suite, filename_null);
+ ptu_run(suite, offset_null);
+ ptu_run(suite, size_null);
+ ptu_run(suite, get_null);
+ ptu_run(suite, put_null);
+ ptu_run(suite, attach_null);
+ ptu_run(suite, detach_null);
+ ptu_run(suite, map_null);
+ ptu_run(suite, unmap_null);
+ ptu_run(suite, cache_null);
+
+ ptu_run_f(suite, get_overflow, sfix);
+ ptu_run_f(suite, attach_overflow, sfix);
+ ptu_run_f(suite, attach_bad_ucount, sfix);
+ ptu_run_f(suite, map_change, sfix);
+ ptu_run_f(suite, map_put, sfix);
+ ptu_run_f(suite, unmap_nomap, sfix);
+ ptu_run_f(suite, map_overflow, sfix);
+ ptu_run_f(suite, get_put, sfix);
+ ptu_run_f(suite, attach_detach, sfix);
+ ptu_run_f(suite, attach_bad_iscache, sfix);
+ ptu_run_f(suite, detach_bad_iscache, sfix);
+ ptu_run_f(suite, map_unmap, sfix);
+ ptu_run_f(suite, attach_map, sfix);
+ ptu_run_f(suite, attach_bad_map, sfix);
+ ptu_run_f(suite, attach_map_overflow, sfix);
+ ptu_run_f(suite, read, sfix);
+ ptu_run_f(suite, read_null, sfix);
+ ptu_run_f(suite, read_offset, sfix);
+ ptu_run_f(suite, read_truncated, sfix);
+ ptu_run_f(suite, read_from_truncated, sfix);
+ ptu_run_f(suite, read_nomem, sfix);
+ ptu_run_f(suite, read_overflow, sfix);
+ ptu_run_f(suite, read_overflow_32bit, sfix);
+ ptu_run_f(suite, read_nomap, sfix);
+ ptu_run_f(suite, read_unmap_map, sfix);
+
+ ptu_run_f(suite, init_no_bcache, sfix);
+ ptu_run_f(suite, bcache_alloc_free, sfix);
+ ptu_run_f(suite, bcache_alloc_twice, sfix);
+ ptu_run_f(suite, bcache_alloc_nomap, sfix);
+
+ ptu_run_f(suite, memsize_null, sfix);
+ ptu_run_f(suite, memsize_nomap, sfix);
+ ptu_run_f(suite, memsize_unmap, sfix);
+ ptu_run_f(suite, memsize_map_nobcache, sfix);
+ ptu_run_f(suite, memsize_map_bcache, sfix);
+
+ ptu_run_fp(suite, stress, sfix, worker_bcache);
+ ptu_run_fp(suite, stress, sfix, worker_read);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-sync.c b/libipt/test/src/ptunit-sync.c
new file mode 100644
index 000000000000..343f9d92886c
--- /dev/null
+++ b/libipt/test/src/ptunit-sync.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_sync.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+
+/* A test fixture for sync tests. */
+struct sync_fixture {
+ /* The trace buffer. */
+ uint8_t buffer[1024];
+
+ /* A trace configuration. */
+ struct pt_config config;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct sync_fixture *);
+ struct ptunit_result (*fini)(struct sync_fixture *);
+};
+
+static struct ptunit_result sfix_init(struct sync_fixture *sfix)
+{
+ memset(sfix->buffer, 0xcd, sizeof(sfix->buffer));
+
+ memset(&sfix->config, 0, sizeof(sfix->config));
+ sfix->config.size = sizeof(sfix->config);
+ sfix->config.begin = sfix->buffer;
+ sfix->config.end = sfix->buffer + sizeof(sfix->buffer);
+
+ return ptu_passed();
+}
+
+static void sfix_encode_psb(uint8_t *pos)
+{
+ int i;
+
+ *pos++ = pt_opc_psb;
+ *pos++ = pt_ext_psb;
+
+ for (i = 0; i < pt_psb_repeat_count; ++i) {
+ *pos++ = pt_psb_hi;
+ *pos++ = pt_psb_lo;
+ }
+}
+
+
+static struct ptunit_result sync_fwd_null(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ errcode = pt_sync_forward(NULL, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_sync_forward(&sync, NULL, &sfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd_null(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ errcode = pt_sync_backward(NULL, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_sync_backward(&sync, NULL, &sfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_sync_backward(&sync, sfix->config.begin, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_fwd_empty(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix->config.end = sfix->config.begin;
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd_empty(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix->config.end = sfix->config.begin;
+
+ errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_fwd_none(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd_none(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_fwd_here(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.begin);
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(sync, sfix->config.begin);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd_here(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.end - ptps_psb);
+
+ errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(sync, sfix->config.end - ptps_psb);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_fwd(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.begin + 0x23);
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(sync, sfix->config.begin + 0x23);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.begin + 0x23);
+
+ errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(sync, sfix->config.begin + 0x23);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_fwd_past(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.begin);
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin + ptps_psb,
+ &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd_past(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.end - ptps_psb);
+
+ errcode = pt_sync_backward(&sync, sfix->config.end - ptps_psb,
+ &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_fwd_cutoff(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.begin);
+ sfix_encode_psb(sfix->config.end - ptps_psb);
+ sfix->config.begin += 1;
+ sfix->config.end -= 1;
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd_cutoff(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.begin);
+ sfix_encode_psb(sfix->config.end - ptps_psb);
+ sfix->config.begin += 1;
+ sfix->config.end -= 1;
+
+ errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct sync_fixture sfix;
+ struct ptunit_suite suite;
+
+ sfix.init = sfix_init;
+ sfix.fini = NULL;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run_f(suite, sync_fwd_null, sfix);
+ ptu_run_f(suite, sync_bwd_null, sfix);
+
+ ptu_run_f(suite, sync_fwd_empty, sfix);
+ ptu_run_f(suite, sync_bwd_empty, sfix);
+
+ ptu_run_f(suite, sync_fwd_none, sfix);
+ ptu_run_f(suite, sync_bwd_none, sfix);
+
+ ptu_run_f(suite, sync_fwd_here, sfix);
+ ptu_run_f(suite, sync_bwd_here, sfix);
+
+ ptu_run_f(suite, sync_fwd, sfix);
+ ptu_run_f(suite, sync_bwd, sfix);
+
+ ptu_run_f(suite, sync_fwd_past, sfix);
+ ptu_run_f(suite, sync_bwd_past, sfix);
+
+ ptu_run_f(suite, sync_fwd_cutoff, sfix);
+ ptu_run_f(suite, sync_bwd_cutoff, sfix);
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-time.c b/libipt/test/src/ptunit-time.c
new file mode 100644
index 000000000000..5beb623c20bc
--- /dev/null
+++ b/libipt/test/src/ptunit-time.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_time.h"
+
+#include "intel-pt.h"
+
+#include "ptunit.h"
+
+
+/* A time unit test fixture. */
+
+struct time_fixture {
+ /* The configuration to use. */
+ struct pt_config config;
+
+ /* The calibration to use. */
+ struct pt_time_cal tcal;
+
+ /* The time struct to update. */
+ struct pt_time time;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct time_fixture *);
+ struct ptunit_result (*fini)(struct time_fixture *);
+};
+
+static struct ptunit_result tfix_init(struct time_fixture *tfix)
+{
+ memset(&tfix->config, 0, sizeof(tfix->config));
+ tfix->config.size = sizeof(tfix->config);
+ tfix->config.cpuid_0x15_eax = 2;
+ tfix->config.cpuid_0x15_ebx = 1;
+ tfix->config.mtc_freq = 4;
+
+ pt_tcal_init(&tfix->tcal);
+ pt_tcal_set_fcr(&tfix->tcal, 0x2ull << pt_tcal_fcr_shr);
+
+ pt_time_init(&tfix->time);
+
+ return ptu_passed();
+}
+
+
+static struct ptunit_result tsc_null(struct time_fixture *tfix)
+{
+ struct pt_packet_tsc packet;
+ int errcode;
+
+ errcode = pt_time_update_tsc(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_tsc(&tfix->time, NULL, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cbr_null(struct time_fixture *tfix)
+{
+ struct pt_packet_cbr packet;
+ int errcode;
+
+ errcode = pt_time_update_cbr(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_cbr(&tfix->time, NULL, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tma_null(struct time_fixture *tfix)
+{
+ struct pt_packet_tma packet;
+ int errcode;
+
+ errcode = pt_time_update_tma(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_tma(&tfix->time, NULL, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_tma(&tfix->time, &packet, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mtc_null(struct time_fixture *tfix)
+{
+ struct pt_packet_mtc packet;
+ int errcode;
+
+ errcode = pt_time_update_mtc(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_mtc(&tfix->time, NULL, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_mtc(&tfix->time, &packet, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cyc_null(struct time_fixture *tfix)
+{
+ struct pt_packet_cyc packet;
+ int errcode;
+
+ errcode = pt_time_update_cyc(NULL, &packet, &tfix->config, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_cyc(&tfix->time, NULL, &tfix->config, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_cyc(&tfix->time, &packet, NULL, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_tsc_null(struct time_fixture *tfix)
+{
+ uint64_t tsc;
+ int errcode;
+
+ errcode = pt_time_query_tsc(NULL, NULL, NULL, &tfix->time);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_query_tsc(&tsc, NULL, NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_tsc_none(struct time_fixture *tfix)
+{
+ uint64_t tsc;
+ int errcode;
+
+ errcode = pt_time_query_tsc(&tsc, NULL, NULL, &tfix->time);
+ ptu_int_eq(errcode, -pte_no_time);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_cbr_null(struct time_fixture *tfix)
+{
+ uint32_t cbr;
+ int errcode;
+
+ errcode = pt_time_query_cbr(NULL, &tfix->time);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_query_cbr(&cbr, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_cbr_none(struct time_fixture *tfix)
+{
+ uint32_t cbr;
+ int errcode;
+
+ errcode = pt_time_query_cbr(&cbr, &tfix->time);
+ ptu_int_eq(errcode, -pte_no_cbr);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tcal_cbr_null(struct time_fixture *tfix)
+{
+ struct pt_packet_cbr packet;
+ int errcode;
+
+ errcode = pt_tcal_update_cbr(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tcal_mtc_null(struct time_fixture *tfix)
+{
+ struct pt_packet_mtc packet;
+ int errcode;
+
+ errcode = pt_tcal_update_mtc(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_tcal_update_mtc(&tfix->tcal, NULL, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_tcal_update_mtc(&tfix->tcal, &packet, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tcal_cyc_null(struct time_fixture *tfix)
+{
+ struct pt_packet_cyc packet;
+ int errcode;
+
+ errcode = pt_tcal_update_cyc(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_tcal_update_cyc(&tfix->tcal, NULL, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tsc(struct time_fixture *tfix)
+{
+ struct pt_packet_tsc packet;
+ uint64_t tsc;
+ uint32_t lost_mtc, lost_cyc;
+ int errcode;
+
+ packet.tsc = 0xdedededeull;
+
+ errcode = pt_time_update_tsc(&tfix->time, &packet, &tfix->config);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_time_query_tsc(&tsc, &lost_mtc, &lost_cyc, &tfix->time);
+ ptu_int_eq(errcode, 0);
+
+ ptu_uint_eq(tsc, 0xdedededeull);
+ ptu_uint_eq(lost_mtc, 0);
+ ptu_uint_eq(lost_cyc, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cbr(struct time_fixture *tfix)
+{
+ struct pt_packet_cbr packet;
+ uint32_t cbr;
+ int errcode;
+
+ packet.ratio = 0x38;
+
+ errcode = pt_time_update_cbr(&tfix->time, &packet, &tfix->config);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_time_query_cbr(&cbr, &tfix->time);
+ ptu_int_eq(errcode, 0);
+
+ ptu_uint_eq(cbr, 0x38);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tma(struct time_fixture *tfix)
+{
+ struct pt_packet_tma packet;
+ int errcode;
+
+ packet.ctc = 0xdc;
+ packet.fc = 0xf;
+
+ errcode = pt_time_update_tma(&tfix->time, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_bad_context);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mtc(struct time_fixture *tfix)
+{
+ struct pt_packet_mtc packet;
+ uint64_t tsc;
+ int errcode;
+
+ packet.ctc = 0xdc;
+
+ errcode = pt_time_update_mtc(&tfix->time, &packet, &tfix->config);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_time_query_tsc(&tsc, NULL, NULL, &tfix->time);
+ ptu_int_eq(errcode, -pte_no_time);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cyc(struct time_fixture *tfix)
+{
+ struct pt_packet_cyc packet;
+ uint64_t fcr, tsc;
+ int errcode;
+
+ errcode = pt_tcal_fcr(&fcr, &tfix->tcal);
+ ptu_int_eq(errcode, 0);
+
+ packet.value = 0xdc;
+
+ errcode = pt_time_update_cyc(&tfix->time, &packet, &tfix->config, fcr);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_time_query_tsc(&tsc, NULL, NULL, &tfix->time);
+ ptu_int_eq(errcode, -pte_no_time);
+
+ return ptu_passed();
+}
+
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+ struct time_fixture tfix;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ tfix.init = tfix_init;
+ tfix.fini = NULL;
+
+ ptu_run_f(suite, tsc_null, tfix);
+ ptu_run_f(suite, cbr_null, tfix);
+ ptu_run_f(suite, tma_null, tfix);
+ ptu_run_f(suite, mtc_null, tfix);
+ ptu_run_f(suite, cyc_null, tfix);
+
+ ptu_run_f(suite, query_tsc_null, tfix);
+ ptu_run_f(suite, query_tsc_none, tfix);
+ ptu_run_f(suite, query_cbr_null, tfix);
+ ptu_run_f(suite, query_cbr_none, tfix);
+
+ ptu_run_f(suite, tcal_cbr_null, tfix);
+ ptu_run_f(suite, tcal_mtc_null, tfix);
+ ptu_run_f(suite, tcal_cyc_null, tfix);
+
+ ptu_run_f(suite, tsc, tfix);
+ ptu_run_f(suite, cbr, tfix);
+ ptu_run_f(suite, tma, tfix);
+ ptu_run_f(suite, mtc, tfix);
+ ptu_run_f(suite, cyc, tfix);
+
+ /* The bulk is covered in ptt tests. */
+
+ return ptunit_report(&suite);
+}
diff --git a/libipt/test/src/ptunit-tnt_cache.c b/libipt/test/src/ptunit-tnt_cache.c
new file mode 100644
index 000000000000..56631ca04f4f
--- /dev/null
+++ b/libipt/test/src/ptunit-tnt_cache.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_tnt_cache.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+
+
+static struct ptunit_result init(void)
+{
+ struct pt_tnt_cache tnt_cache;
+
+ memset(&tnt_cache, 0xcd, sizeof(tnt_cache));
+
+ pt_tnt_cache_init(&tnt_cache);
+
+ ptu_uint_eq(tnt_cache.tnt, 0ull);
+ ptu_uint_eq(tnt_cache.index, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_null(void)
+{
+ pt_tnt_cache_init(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result is_empty_initial(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int status;
+
+ pt_tnt_cache_init(&tnt_cache);
+
+ status = pt_tnt_cache_is_empty(&tnt_cache);
+ ptu_int_eq(status, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result is_empty_no(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int status;
+
+ tnt_cache.index = 1ull;
+
+ status = pt_tnt_cache_is_empty(&tnt_cache);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result is_empty_yes(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int status;
+
+ tnt_cache.index = 0ull;
+
+ status = pt_tnt_cache_is_empty(&tnt_cache);
+ ptu_int_eq(status, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result is_empty_null(void)
+{
+ int status;
+
+ status = pt_tnt_cache_is_empty(NULL);
+ ptu_int_eq(status, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_taken(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int status;
+
+ tnt_cache.tnt = 1ull;
+ tnt_cache.index = 1ull;
+
+ status = pt_tnt_cache_query(&tnt_cache);
+ ptu_int_eq(status, 1);
+ ptu_uint_eq(tnt_cache.index, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_not_taken(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int status;
+
+ tnt_cache.tnt = 0ull;
+ tnt_cache.index = 1ull;
+
+ status = pt_tnt_cache_query(&tnt_cache);
+ ptu_int_eq(status, 0);
+ ptu_uint_eq(tnt_cache.index, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_empty(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int status;
+
+ tnt_cache.index = 0ull;
+
+ status = pt_tnt_cache_query(&tnt_cache);
+ ptu_int_eq(status, -pte_bad_query);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_null(void)
+{
+ int status;
+
+ status = pt_tnt_cache_query(NULL);
+ ptu_int_eq(status, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_tnt(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ struct pt_packet_tnt packet;
+ int errcode;
+
+ pt_tnt_cache_init(&tnt_cache);
+
+ packet.bit_size = 4ull;
+ packet.payload = 8ull;
+
+ errcode = pt_tnt_cache_update_tnt(&tnt_cache, &packet, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(tnt_cache.tnt, 8ull);
+ ptu_uint_eq(tnt_cache.index, 1ull << 3);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_tnt_not_empty(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ struct pt_packet_tnt packet;
+ int errcode;
+
+ tnt_cache.tnt = 42ull;
+ tnt_cache.index = 12ull;
+
+ errcode = pt_tnt_cache_update_tnt(&tnt_cache, &packet, NULL);
+ ptu_int_eq(errcode, -pte_bad_context);
+ ptu_uint_eq(tnt_cache.tnt, 42ull);
+ ptu_uint_eq(tnt_cache.index, 12ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_tnt_null_tnt(void)
+{
+ struct pt_packet_tnt packet;
+ int errcode;
+
+ errcode = pt_tnt_cache_update_tnt(NULL, &packet, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_tnt_null_packet(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int errcode;
+
+ tnt_cache.tnt = 42ull;
+ tnt_cache.index = 12ull;
+
+ errcode = pt_tnt_cache_update_tnt(&tnt_cache, NULL, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+ ptu_uint_eq(tnt_cache.tnt, 42ull);
+ ptu_uint_eq(tnt_cache.index, 12ull);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init);
+ ptu_run(suite, init_null);
+ ptu_run(suite, is_empty_initial);
+ ptu_run(suite, is_empty_no);
+ ptu_run(suite, is_empty_yes);
+ ptu_run(suite, is_empty_null);
+ ptu_run(suite, query_taken);
+ ptu_run(suite, query_not_taken);
+ ptu_run(suite, query_empty);
+ ptu_run(suite, query_null);
+ ptu_run(suite, update_tnt);
+ ptu_run(suite, update_tnt_not_empty);
+ ptu_run(suite, update_tnt_null_tnt);
+ ptu_run(suite, update_tnt_null_packet);
+
+ return ptunit_report(&suite);
+}
diff --git a/pevent/CMakeLists.txt b/pevent/CMakeLists.txt
new file mode 100644
index 000000000000..3be33c526c83
--- /dev/null
+++ b/pevent/CMakeLists.txt
@@ -0,0 +1,35 @@
+# Copyright (c) 2014-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+add_library(pevent STATIC
+ src/pevent.c
+)
+
+set_target_properties(pevent PROPERTIES
+ POSITION_INDEPENDENT_CODE TRUE
+)
+
+add_ptunit_c_test(pevent src/pevent.c)
diff --git a/pevent/include/pevent.h b/pevent/include/pevent.h
new file mode 100644
index 000000000000..197c1822cbef
--- /dev/null
+++ b/pevent/include/pevent.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PEVENT_H
+#define PEVENT_H
+
+#include <linux/perf_event.h>
+
+#include "intel-pt.h"
+
+#include <stdint.h>
+#include <stddef.h>
+#include <string.h>
+
+
+/* A perf event configuration. */
+struct pev_config {
+ /* The size of the config structure in bytes. */
+ size_t size;
+
+ /* The respective field in struct perf_event_attr.
+ *
+ * We require sample_id_all in struct perf_event_attr to be set.
+ */
+ uint64_t sample_type;
+
+ /* The respective fields in struct perf_event_mmap_page. */
+ uint16_t time_shift;
+ uint32_t time_mult;
+ uint64_t time_zero;
+};
+
+static inline void pev_config_init(struct pev_config *config)
+{
+ memset(config, 0, sizeof(*config));
+ config->size = sizeof(*config);
+}
+
+
+/* The MMAP perf event record. */
+struct pev_record_mmap {
+ uint32_t pid, tid;
+ uint64_t addr;
+ uint64_t len;
+ uint64_t pgoff;
+ char filename[];
+};
+
+/* The LOST perf event record. */
+struct pev_record_lost {
+ uint64_t id;
+ uint64_t lost;
+};
+
+/* The COMM perf event record. */
+struct pev_record_comm {
+ uint32_t pid, tid;
+ char comm[];
+};
+
+/* The EXIT perf event record. */
+struct pev_record_exit {
+ uint32_t pid, ppid;
+ uint32_t tid, ptid;
+ uint64_t time;
+};
+
+/* The THROTTLE and UNTHROTTLE perf event records. */
+struct pev_record_throttle {
+ uint64_t time;
+ uint64_t id;
+ uint64_t stream_id;
+};
+
+/* The FORK perf event record. */
+struct pev_record_fork {
+ uint32_t pid, ppid;
+ uint32_t tid, ptid;
+ uint64_t time;
+};
+
+/* The MMAP2 perf event record. */
+struct pev_record_mmap2 {
+ uint32_t pid, tid;
+ uint64_t addr;
+ uint64_t len;
+ uint64_t pgoff;
+ uint32_t maj, min;
+ uint64_t ino;
+ uint64_t ino_generation;
+ uint32_t prot, flags;
+ char filename[];
+};
+
+/* The AUX perf event record. */
+struct pev_record_aux {
+ uint64_t aux_offset;
+ uint64_t aux_size;
+ uint64_t flags;
+};
+
+/* The ITRACE_START perf event record. */
+struct pev_record_itrace_start {
+ uint32_t pid, tid;
+};
+
+/* The LOST_SAMPLES perf event record. */
+struct pev_record_lost_samples {
+ uint64_t lost;
+};
+
+/* The SWITCH_CPU_WIDE perf event record. */
+struct pev_record_switch_cpu_wide {
+ uint32_t next_prev_pid;
+ uint32_t next_prev_tid;
+};
+
+/* A perf event record. */
+struct pev_event {
+ /* The record type (enum perf_event_type). */
+ uint32_t type;
+
+ /* The misc field of the perf event header. */
+ uint16_t misc;
+
+ /* The perf event record. */
+ union {
+ /* @type = PERF_RECORD_MMAP. */
+ const struct pev_record_mmap *mmap;
+
+ /* @type = PERF_RECORD_LOST. */
+ const struct pev_record_lost *lost;
+
+ /* @type = PERF_RECORD_COMM. */
+ const struct pev_record_comm *comm;
+
+ /* @type = PERF_RECORD_EXIT. */
+ const struct pev_record_exit *exit;
+
+ /* @type = PERF_RECORD_(UN)THROTTLE. */
+ const struct pev_record_throttle *throttle;
+
+ /* @type = PERF_RECORD_FORK. */
+ const struct pev_record_fork *fork;
+
+ /* @type = PERF_RECORD_MMAP2. */
+ const struct pev_record_mmap2 *mmap2;
+
+ /* @type = PERF_RECORD_AUX. */
+ const struct pev_record_aux *aux;
+
+ /* @type = PERF_RECORD_ITRACE_START. */
+ const struct pev_record_itrace_start *itrace_start;
+
+ /* @type = PERF_RECORD_LOST_SAMPLES. */
+ const struct pev_record_lost_samples *lost_samples;
+
+ /* @type = PERF_RECORD_SWITCH_CPU_WIDE. */
+ const struct pev_record_switch_cpu_wide *switch_cpu_wide;
+ } record;
+
+ /* The additional samples. */
+ struct {
+ /* The sampled pid and tid. */
+ const uint32_t *pid;
+ const uint32_t *tid;
+
+ /* The sampled time in perf_event format. */
+ const uint64_t *time;
+
+ /* The sampled time in TSC format - if @time is not NULL. */
+ uint64_t tsc;
+
+ /* The sampled id. */
+ const uint64_t *id;
+
+ /* The sampled stream id. */
+ const uint64_t *stream_id;
+
+ /* The sampled cpu. */
+ const uint32_t *cpu;
+
+ /* The sample identifier. */
+ const uint64_t *identifier;
+ } sample;
+};
+
+static inline void pev_event_init(struct pev_event *event)
+{
+ memset(event, 0, sizeof(*event));
+}
+
+/* Convert perf_event time to TSC.
+ *
+ * Converts @time in perf_event format to @tsc.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_bad_config if @config->size is too small.
+ * Returns -pte_bad_config if @config->time_mult is zero.
+ * Returns -pte_internal if @tsc or @config is NULL.
+ */
+extern int pev_time_to_tsc(uint64_t *tsc, uint64_t time,
+ const struct pev_config *config);
+
+/* Convert TSC to perf_event time.
+ *
+ * Converts @tsc into @time in perf_event format.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_bad_config if @config->size is too small.
+ * Returns -pte_bad_config if @config->time_mult is zero.
+ * Returns -pte_internal if @time or @config is NULL.
+ */
+extern int pev_time_from_tsc(uint64_t *time, uint64_t tsc,
+ const struct pev_config *config);
+
+/* Read a perf_event record.
+ *
+ * Reads one perf_event record from [@begin; @end[ into @event.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ * Returns -pte_bad_config if @config->size is too small.
+ * Returns -pte_eos if the event does not fit into [@begin; @end[.
+ * Returns -pte_internal if @event, @config, @begin, or @end is NULL.
+ */
+extern int pev_read(struct pev_event *event, const uint8_t *begin,
+ const uint8_t *end, const struct pev_config *config);
+
+/* Write a perf_event record.
+ *
+ * Writes @event into [@begin; @end[.
+ *
+ * Returns the number of bytes written on success, a negative error code
+ * otherwise.
+ * Returns -pte_bad_config if @config->size is too small.
+ * Returns -pte_bad_opc if the event type is not known.
+ * Returns -pte_eos if the event does not fit into [@begin; @end[.
+ * Returns -pte_internal if @begin, @end, @event, or @config is NULL.
+ */
+extern int pev_write(const struct pev_event *event, uint8_t *begin,
+ uint8_t *end, const struct pev_config *config);
+
+#endif /* PEVENT_H */
diff --git a/pevent/src/pevent.c b/pevent/src/pevent.c
new file mode 100644
index 000000000000..f8a74341c16f
--- /dev/null
+++ b/pevent/src/pevent.c
@@ -0,0 +1,664 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pevent.h"
+
+
+#define pev_config_has(config, field) \
+ (config->size >= (offsetof(struct pev_config, field) + \
+ sizeof(config->field)))
+
+int pev_time_to_tsc(uint64_t *tsc, uint64_t time,
+ const struct pev_config *config)
+{
+ uint64_t quot, rem, time_zero;
+ uint16_t time_shift;
+ uint32_t time_mult;
+
+ if (!tsc || !config)
+ return -pte_internal;
+
+ if (!pev_config_has(config, time_zero))
+ return -pte_bad_config;
+
+ time_shift = config->time_shift;
+ time_mult = config->time_mult;
+ time_zero = config->time_zero;
+
+ if (!time_mult)
+ return -pte_bad_config;
+
+ time -= time_zero;
+
+ quot = time / time_mult;
+ rem = time % time_mult;
+
+ quot <<= time_shift;
+ rem <<= time_shift;
+ rem /= time_mult;
+
+ *tsc = quot + rem;
+
+ return 0;
+}
+
+int pev_time_from_tsc(uint64_t *time, uint64_t tsc,
+ const struct pev_config *config)
+{
+ uint64_t quot, rem, time_zero;
+ uint16_t time_shift;
+ uint32_t time_mult;
+
+ if (!time || !config)
+ return -pte_internal;
+
+ if (!pev_config_has(config, time_zero))
+ return -pte_bad_config;
+
+ time_shift = config->time_shift;
+ time_mult = config->time_mult;
+ time_zero = config->time_zero;
+
+ if (!time_mult)
+ return -pte_bad_config;
+
+ quot = tsc >> time_shift;
+ rem = tsc & ((1ull << time_shift) - 1);
+
+ quot *= time_mult;
+ rem *= time_mult;
+ rem >>= time_shift;
+
+ *time = time_zero + quot + rem;
+
+ return 0;
+}
+
+static int pev_strlen(const char *begin, const void *end_arg)
+{
+ const char *pos, *end;
+
+ if (!begin || !end_arg)
+ return -pte_internal;
+
+ end = (const char *) end_arg;
+ if (end < begin)
+ return -pte_internal;
+
+ for (pos = begin; pos < end; ++pos) {
+ if (!pos[0])
+ return (int) (pos - begin) + 1;
+ }
+
+ return -pte_bad_packet;
+}
+
+static int pev_read_samples(struct pev_event *event, const uint8_t *begin,
+ const uint8_t *end, const struct pev_config *config)
+{
+ const uint8_t *pos;
+ uint64_t sample_type;
+
+ if (!event || !begin || !config)
+ return -pte_internal;
+
+ if (!pev_config_has(config, sample_type))
+ return -pte_bad_config;
+
+ sample_type = config->sample_type;
+ pos = begin;
+
+ if (sample_type & PERF_SAMPLE_TID) {
+ event->sample.pid = (const uint32_t *) &pos[0];
+ event->sample.tid = (const uint32_t *) &pos[4];
+ pos += 8;
+ }
+
+ if (sample_type & PERF_SAMPLE_TIME) {
+ int errcode;
+
+ event->sample.time = (const uint64_t *) pos;
+ pos += 8;
+
+ /* We're reading the time. Let's make sure the pointer lies
+ * inside the buffer.
+ */
+ if (end < pos)
+ return -pte_nosync;
+
+ errcode = pev_time_to_tsc(&event->sample.tsc,
+ *event->sample.time, config);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ if (sample_type & PERF_SAMPLE_ID) {
+ event->sample.id = (const uint64_t *) pos;
+ pos += 8;
+ }
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID) {
+ event->sample.stream_id = (const uint64_t *) pos;
+ pos += 8;
+ }
+
+ if (sample_type & PERF_SAMPLE_CPU) {
+ event->sample.cpu = (const uint32_t *) pos;
+ pos += 8;
+ }
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER) {
+ event->sample.identifier = (const uint64_t *) pos;
+ pos += 8;
+ }
+
+ return (int) (pos - begin);
+}
+
+int pev_read(struct pev_event *event, const uint8_t *begin, const uint8_t *end,
+ const struct pev_config *config)
+{
+ const struct perf_event_header *header;
+ const uint8_t *pos;
+ int size;
+
+ if (!event || !begin || end < begin)
+ return -pte_internal;
+
+ pos = begin;
+ if (end < (pos + sizeof(*header)))
+ return -pte_eos;
+
+ header = (const struct perf_event_header *) pos;
+ pos += sizeof(*header);
+
+ if (!header->type || (end < (begin + header->size)))
+ return -pte_eos;
+
+ /* Stay within the packet. */
+ end = begin + header->size;
+
+ memset(event, 0, sizeof(*event));
+
+ event->type = header->type;
+ event->misc = header->misc;
+
+ switch (event->type) {
+ default:
+ /* We don't provide samples.
+ *
+ * It would be possible since we know the event's total size
+ * as well as the sample size. But why?
+ */
+ return (int) header->size;
+
+ case PERF_RECORD_MMAP: {
+ int slen;
+
+ event->record.mmap = (const struct pev_record_mmap *) pos;
+
+ slen = pev_strlen(event->record.mmap->filename, end);
+ if (slen < 0)
+ return slen;
+
+ slen = (slen + 7) & ~7;
+
+ pos += sizeof(*event->record.mmap);
+ pos += slen;
+ }
+ break;
+
+ case PERF_RECORD_LOST:
+ event->record.lost = (const struct pev_record_lost *) pos;
+ pos += sizeof(*event->record.lost);
+ break;
+
+ case PERF_RECORD_COMM: {
+ int slen;
+
+ event->record.comm = (const struct pev_record_comm *) pos;
+
+ slen = pev_strlen(event->record.comm->comm, end);
+ if (slen < 0)
+ return slen;
+
+ slen = (slen + 7) & ~7;
+
+ pos += sizeof(*event->record.comm);
+ pos += slen;
+ }
+ break;
+
+ case PERF_RECORD_EXIT:
+ event->record.exit = (const struct pev_record_exit *) pos;
+ pos += sizeof(*event->record.exit);
+ break;
+
+ case PERF_RECORD_THROTTLE:
+ case PERF_RECORD_UNTHROTTLE:
+ event->record.throttle =
+ (const struct pev_record_throttle *) pos;
+ pos += sizeof(*event->record.throttle);
+ break;
+
+ case PERF_RECORD_FORK:
+ event->record.fork = (const struct pev_record_fork *) pos;
+ pos += sizeof(*event->record.fork);
+ break;
+
+ case PERF_RECORD_MMAP2: {
+ int slen;
+
+ event->record.mmap2 = (const struct pev_record_mmap2 *) pos;
+
+ slen = pev_strlen(event->record.mmap2->filename, end);
+ if (slen < 0)
+ return slen;
+
+ slen = (slen + 7) & ~7;
+
+ pos += sizeof(*event->record.mmap2);
+ pos += slen;
+ }
+ break;
+
+ case PERF_RECORD_AUX:
+ event->record.aux = (const struct pev_record_aux *) pos;
+ pos += sizeof(*event->record.aux);
+ break;
+
+ case PERF_RECORD_ITRACE_START:
+ event->record.itrace_start =
+ (const struct pev_record_itrace_start *) pos;
+ pos += sizeof(*event->record.itrace_start);
+ break;
+
+ case PERF_RECORD_LOST_SAMPLES:
+ event->record.lost_samples =
+ (const struct pev_record_lost_samples *) pos;
+ pos += sizeof(*event->record.lost_samples);
+ break;
+
+ case PERF_RECORD_SWITCH:
+ break;
+
+ case PERF_RECORD_SWITCH_CPU_WIDE:
+ event->record.switch_cpu_wide =
+ (const struct pev_record_switch_cpu_wide *) pos;
+ pos += sizeof(*event->record.switch_cpu_wide);
+ break;
+ }
+
+ size = pev_read_samples(event, pos, end, config);
+ if (size < 0)
+ return size;
+
+ pos += size;
+ if (pos < begin)
+ return -pte_internal;
+
+ size = (int) (pos - begin);
+ if ((uint16_t) size != header->size)
+ return -pte_nosync;
+
+ return size;
+}
+
+static size_t sample_size(const struct pev_event *event)
+{
+ size_t size;
+
+ if (!event)
+ return 0;
+
+ size = 0;
+
+ if (event->sample.tid) {
+ size += sizeof(*event->sample.pid);
+ size += sizeof(*event->sample.tid);
+ }
+
+ if (event->sample.time)
+ size += sizeof(*event->sample.time);
+
+ if (event->sample.id)
+ size += sizeof(*event->sample.id);
+
+ if (event->sample.stream_id)
+ size += sizeof(*event->sample.stream_id);
+
+ if (event->sample.cpu) {
+ size += sizeof(*event->sample.cpu);
+ size += sizeof(uint32_t);
+ }
+
+ if (event->sample.identifier)
+ size += sizeof(*event->sample.identifier);
+
+ return size;
+}
+
+static void write(uint8_t **stream, const void *object, size_t size)
+{
+ memcpy(*stream, object, size);
+ *stream += size;
+}
+
+static void clear(uint8_t **stream, size_t size)
+{
+ memset(*stream, 0, size);
+ *stream += size;
+}
+
+static int write_samples(uint8_t **stream, const struct pev_event *event,
+ const struct pev_config *config)
+{
+ uint64_t sample_type;
+
+ if (!event || !config)
+ return -pte_internal;
+
+ if (!pev_config_has(config, sample_type))
+ return -pte_bad_config;
+
+ sample_type = config->sample_type;
+
+ if (sample_type & PERF_SAMPLE_TID) {
+ sample_type &= ~(uint64_t) PERF_SAMPLE_TID;
+
+ if (!event->sample.pid || !event->sample.tid)
+ return -pte_bad_packet;
+
+ write(stream, event->sample.pid, sizeof(*event->sample.pid));
+ write(stream, event->sample.tid, sizeof(*event->sample.tid));
+ }
+
+ if (sample_type & PERF_SAMPLE_TIME) {
+ sample_type &= ~(uint64_t) PERF_SAMPLE_TIME;
+
+ if (!event->sample.time)
+ return -pte_bad_packet;
+
+ write(stream, event->sample.time, sizeof(*event->sample.time));
+ }
+
+ if (sample_type & PERF_SAMPLE_ID) {
+ sample_type &= ~(uint64_t) PERF_SAMPLE_ID;
+
+ if (!event->sample.id)
+ return -pte_bad_packet;
+
+ write(stream, event->sample.id, sizeof(*event->sample.id));
+ }
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID) {
+ sample_type &= ~(uint64_t) PERF_SAMPLE_STREAM_ID;
+
+ if (!event->sample.stream_id)
+ return -pte_bad_packet;
+
+ write(stream, event->sample.stream_id,
+ sizeof(*event->sample.stream_id));
+ }
+
+ if (sample_type & PERF_SAMPLE_CPU) {
+ sample_type &= ~(uint64_t) PERF_SAMPLE_CPU;
+
+ if (!event->sample.cpu)
+ return -pte_bad_packet;
+
+ write(stream, event->sample.cpu, sizeof(*event->sample.cpu));
+ *stream += sizeof(uint32_t);
+ }
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER) {
+ sample_type &= ~(uint64_t) PERF_SAMPLE_IDENTIFIER;
+
+ if (!event->sample.identifier)
+ return -pte_bad_packet;
+
+ write(stream, event->sample.identifier,
+ sizeof(*event->sample.identifier));
+ }
+
+ if (sample_type)
+ return -pte_bad_packet;
+
+ return 0;
+}
+
+int pev_write(const struct pev_event *event, uint8_t *begin, uint8_t *end,
+ const struct pev_config *config)
+{
+ struct perf_event_header header;
+ uint8_t *pos;
+ size_t size;
+ int errcode;
+
+ if (!event || !begin || end < begin)
+ return -pte_internal;
+
+ pos = begin;
+ size = sizeof(header) + sample_size(event);
+ if (UINT16_MAX < size)
+ return -pte_internal;
+
+ header.type = event->type;
+ header.misc = event->misc;
+
+ switch (header.type) {
+ default:
+ return -pte_bad_opc;
+
+ case PERF_RECORD_MMAP: {
+ size_t slen, gap;
+
+ slen = strlen(event->record.mmap->filename) + 1;
+ gap = ((slen + 7) & ~7) - slen;
+
+ size += sizeof(*event->record.mmap) + slen + gap;
+ if (UINT16_MAX < size)
+ return -pte_bad_packet;
+
+ header.size = (uint16_t) size;
+ if (end < pos + header.size)
+ return -pte_eos;
+
+ write(&pos, &header, sizeof(header));
+ write(&pos, event->record.mmap, sizeof(*event->record.mmap));
+ write(&pos, event->record.mmap->filename, slen);
+ clear(&pos, gap);
+ }
+ break;
+
+ case PERF_RECORD_LOST:
+ size += sizeof(*event->record.lost);
+ if (UINT16_MAX < size)
+ return -pte_bad_packet;
+
+ header.size = (uint16_t) size;
+ if (end < pos + header.size)
+ return -pte_eos;
+
+ write(&pos, &header, sizeof(header));
+ write(&pos, event->record.lost, sizeof(*event->record.lost));
+ break;
+
+ case PERF_RECORD_COMM: {
+ size_t slen, gap;
+
+ slen = strlen(event->record.comm->comm) + 1;
+ gap = ((slen + 7) & ~7) - slen;
+
+ size += sizeof(*event->record.comm) + slen + gap;
+ if (UINT16_MAX < size)
+ return -pte_bad_packet;
+
+ header.size = (uint16_t) size;
+ if (end < pos + header.size)
+ return -pte_eos;
+
+ write(&pos, &header, sizeof(header));
+ write(&pos, event->record.comm, sizeof(*event->record.comm));
+ write(&pos, event->record.comm->comm, slen);
+ clear(&pos, gap);
+ }
+ break;
+
+ case PERF_RECORD_EXIT:
+ size += sizeof(*event->record.exit);
+ if (UINT16_MAX < size)
+ return -pte_bad_packet;
+
+ header.size = (uint16_t) size;
+ if (end < pos + header.size)
+ return -pte_eos;
+
+ write(&pos, &header, sizeof(header));
+ write(&pos, event->record.exit, sizeof(*event->record.exit));
+ break;
+
+ case PERF_RECORD_THROTTLE:
+ case PERF_RECORD_UNTHROTTLE:
+ size += sizeof(*event->record.throttle);
+ if (UINT16_MAX < size)
+ return -pte_bad_packet;
+
+ header.size = (uint16_t) size;
+ if (end < pos + header.size)
+ return -pte_eos;
+
+ write(&pos, &header, sizeof(header));
+ write(&pos, event->record.throttle,
+ sizeof(*event->record.throttle));
+ break;
+
+ case PERF_RECORD_FORK:
+ size += sizeof(*event->record.fork);
+ if (UINT16_MAX < size)
+ return -pte_bad_packet;
+
+ header.size = (uint16_t) size;
+ if (end < pos + header.size)
+ return -pte_eos;
+
+ write(&pos, &header, sizeof(header));
+ write(&pos, event->record.fork, sizeof(*event->record.fork));
+ break;
+
+ case PERF_RECORD_MMAP2: {
+ size_t slen, gap;
+
+ slen = strlen(event->record.mmap2->filename) + 1;
+ gap = ((slen + 7) & ~7) - slen;
+
+ size += sizeof(*event->record.mmap2) + slen + gap;
+ if (UINT16_MAX < size)
+ return -pte_bad_packet;
+
+ header.size = (uint16_t) size;
+ if (end < pos + header.size)
+ return -pte_eos;
+
+ write(&pos, &header, sizeof(header));
+ write(&pos, event->record.mmap2, sizeof(*event->record.mmap2));
+ write(&pos, event->record.mmap2->filename, slen);
+ clear(&pos, gap);
+ }
+ break;
+
+ case PERF_RECORD_AUX:
+ size += sizeof(*event->record.aux);
+ if (UINT16_MAX < size)
+ return -pte_bad_packet;
+
+ header.size = (uint16_t) size;
+ if (end < pos + header.size)
+ return -pte_eos;
+
+ write(&pos, &header, sizeof(header));
+ write(&pos, event->record.aux, sizeof(*event->record.aux));
+ break;
+
+ case PERF_RECORD_ITRACE_START:
+ size += sizeof(*event->record.itrace_start);
+ if (UINT16_MAX < size)
+ return -pte_bad_packet;
+
+ header.size = (uint16_t) size;
+ if (end < pos + header.size)
+ return -pte_eos;
+
+ write(&pos, &header, sizeof(header));
+ write(&pos, event->record.itrace_start,
+ sizeof(*event->record.itrace_start));
+ break;
+
+ case PERF_RECORD_LOST_SAMPLES:
+ size += sizeof(*event->record.lost_samples);
+ if (UINT16_MAX < size)
+ return -pte_bad_packet;
+
+ header.size = (uint16_t) size;
+ if (end < pos + header.size)
+ return -pte_eos;
+
+ write(&pos, &header, sizeof(header));
+ write(&pos, event->record.lost_samples,
+ sizeof(*event->record.lost_samples));
+ break;
+
+ case PERF_RECORD_SWITCH:
+ header.size = (uint16_t) size;
+ if (end < pos + header.size)
+ return -pte_eos;
+
+ write(&pos, &header, sizeof(header));
+ break;
+
+ case PERF_RECORD_SWITCH_CPU_WIDE:
+ size += sizeof(*event->record.switch_cpu_wide);
+ if (UINT16_MAX < size)
+ return -pte_bad_packet;
+
+ header.size = (uint16_t) size;
+ if (end < pos + header.size)
+ return -pte_eos;
+
+ write(&pos, &header, sizeof(header));
+ write(&pos, event->record.switch_cpu_wide,
+ sizeof(*event->record.switch_cpu_wide));
+ break;
+ }
+
+ errcode = write_samples(&pos, event, config);
+ if (errcode < 0)
+ return errcode;
+
+ return (int) (pos - begin);
+}
diff --git a/pevent/test/src/ptunit-pevent.c b/pevent/test/src/ptunit-pevent.c
new file mode 100644
index 000000000000..663ed06190e5
--- /dev/null
+++ b/pevent/test/src/ptunit-pevent.c
@@ -0,0 +1,799 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pevent.h"
+
+
+/* A test fixture. */
+struct pev_fixture {
+ /* A memory buffer. */
+ uint8_t buffer[1024];
+
+ /* Two perf events:
+ *
+ * event[0] is the test setup
+ * event[1] is the event after writing and reading event[0]
+ */
+ struct pev_event event[2];
+
+ /* The perf event configuration. */
+ struct pev_config config;
+
+ /* Test samples. */
+ struct {
+ uint32_t pid, tid;
+ uint64_t time;
+ uint64_t tsc;
+ uint32_t cpu;
+ } sample;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct pev_fixture *);
+ struct ptunit_result (*fini)(struct pev_fixture *);
+};
+
+static struct ptunit_result pfix_init(struct pev_fixture *pfix)
+{
+ memset(pfix->buffer, 0xcd, sizeof(pfix->buffer));
+ memset(&pfix->sample, 0xcd, sizeof(pfix->sample));
+
+ pev_event_init(&pfix->event[0]);
+ pev_event_init(&pfix->event[1]);
+
+ pev_config_init(&pfix->config);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pfix_init_sample_time(struct pev_fixture *pfix)
+{
+ ptu_test(pfix_init, pfix);
+
+ pfix->config.sample_type |= (uint64_t) PERF_SAMPLE_TIME;
+ pfix->config.time_zero = 0xa0b00000ull;
+ pfix->config.time_shift = 4;
+ pfix->config.time_mult = 3;
+
+ pfix->sample.time = 0xa0b00cdeull;
+ pfix->event[0].sample.time = &pfix->sample.time;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pfix_init_sample_who(struct pev_fixture *pfix)
+{
+ ptu_test(pfix_init, pfix);
+
+ pfix->config.sample_type |= (uint64_t) PERF_SAMPLE_TID;
+ pfix->config.sample_type |= (uint64_t) PERF_SAMPLE_CPU;
+
+ pfix->sample.pid = 0xa0;
+ pfix->sample.tid = 0xa1;
+ pfix->sample.cpu = 0xb;
+
+ pfix->event[0].sample.pid = &pfix->sample.pid;
+ pfix->event[0].sample.tid = &pfix->sample.tid;
+ pfix->event[0].sample.cpu = &pfix->sample.cpu;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pfix_read_write(struct pev_fixture *pfix)
+{
+ uint8_t *begin, *end;
+ int size[2];
+
+ begin = pfix->buffer;
+ end = begin + sizeof(pfix->buffer);
+
+ size[0] = pev_write(&pfix->event[0], begin, end, &pfix->config);
+ ptu_int_gt(size[0], 0);
+
+ size[1] = pev_read(&pfix->event[1], begin, end, &pfix->config);
+ ptu_int_gt(size[1], 0);
+
+ ptu_int_eq(size[1], size[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pfix_check_sample_time(struct pev_fixture *pfix)
+{
+ const uint64_t *time[2];
+ uint64_t tsc;
+ int errcode;
+
+ time[0] = pfix->event[0].sample.time;
+ time[1] = pfix->event[1].sample.time;
+
+ ptu_ptr(time[0]);
+ ptu_ptr(time[1]);
+
+ ptu_uint_eq(*time[1], *time[0]);
+
+ errcode = pev_time_to_tsc(&tsc, *time[0], &pfix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(pfix->event[1].sample.tsc, tsc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pfix_check_sample_tid(struct pev_fixture *pfix)
+{
+ const uint32_t *pid[2], *tid[2];
+
+ pid[0] = pfix->event[0].sample.pid;
+ pid[1] = pfix->event[1].sample.pid;
+ tid[0] = pfix->event[0].sample.tid;
+ tid[1] = pfix->event[1].sample.tid;
+
+ ptu_ptr(pid[0]);
+ ptu_ptr(pid[1]);
+ ptu_ptr(tid[0]);
+ ptu_ptr(tid[1]);
+
+ ptu_uint_eq(*pid[1], *pid[0]);
+ ptu_uint_eq(*tid[1], *tid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pfix_check_sample_cpu(struct pev_fixture *pfix)
+{
+ const uint32_t *cpu[2];
+
+ cpu[0] = pfix->event[0].sample.cpu;
+ cpu[1] = pfix->event[1].sample.cpu;
+
+ ptu_ptr(cpu[0]);
+ ptu_ptr(cpu[1]);
+
+ ptu_uint_eq(*cpu[1], *cpu[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pfix_check_sample(struct pev_fixture *pfix)
+{
+ if (pfix->config.sample_type & PERF_SAMPLE_TID)
+ ptu_test(pfix_check_sample_tid, pfix);
+ else {
+ ptu_null(pfix->event[1].sample.pid);
+ ptu_null(pfix->event[1].sample.tid);
+ }
+
+ if (pfix->config.sample_type & PERF_SAMPLE_TIME)
+ ptu_test(pfix_check_sample_time, pfix);
+ else
+ ptu_null(pfix->event[1].sample.time);
+
+ if (pfix->config.sample_type & PERF_SAMPLE_CPU)
+ ptu_test(pfix_check_sample_cpu, pfix);
+ else
+ ptu_null(pfix->event[1].sample.cpu);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result time_to_tsc_null(void)
+{
+ struct pev_config config;
+ uint64_t tsc;
+ int errcode;
+
+ errcode = pev_time_to_tsc(NULL, 0x0ull, &config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pev_time_to_tsc(&tsc, 0x0ull, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result time_from_tsc_null(void)
+{
+ struct pev_config config;
+ uint64_t time;
+ int errcode;
+
+ errcode = pev_time_from_tsc(NULL, 0x0ull, &config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pev_time_from_tsc(&time, 0x0ull, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result time_to_tsc(void)
+{
+ struct pev_config config;
+ uint64_t tsc;
+ int errcode;
+
+ pev_config_init(&config);
+ config.time_shift = 4;
+ config.time_mult = 3;
+ config.time_zero = 0xa00b00ull;
+
+ errcode = pev_time_to_tsc(&tsc, 0xa00b43ull, &config);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(tsc, 0x165ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result time_from_tsc(void)
+{
+ struct pev_config config;
+ uint64_t time;
+ int errcode;
+
+ pev_config_init(&config);
+ config.time_shift = 4;
+ config.time_mult = 3;
+ config.time_zero = 0xa00b00ull;
+
+ errcode = pev_time_from_tsc(&time, 0x23bull, &config);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(time, 0xa00b6bull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result time_to_tsc_bad_config(void)
+{
+ struct pev_config config;
+ uint64_t tsc;
+ int errcode;
+
+ memset(&config, 0, sizeof(config));
+ config.time_mult = 1;
+
+ errcode = pev_time_to_tsc(&tsc, 0x0ull, &config);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ config.size = sizeof(config);
+ config.time_mult = 0;
+
+ errcode = pev_time_to_tsc(&tsc, 0x0ull, &config);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result time_from_tsc_bad_config(void)
+{
+ struct pev_config config;
+ uint64_t time;
+ int errcode;
+
+ memset(&config, 0, sizeof(config));
+ config.time_mult = 1;
+
+ errcode = pev_time_from_tsc(&time, 0x0ull, &config);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ config.size = sizeof(config);
+ config.time_mult = 0;
+
+ errcode = pev_time_from_tsc(&time, 0x0ull, &config);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_bad_config(void)
+{
+ union {
+ struct perf_event_header header;
+ uint8_t buffer[128];
+ } input;
+ struct pev_config config;
+ struct pev_event event;
+ int errcode;
+
+ memset(input.buffer, 0, sizeof(input.buffer));
+ input.header.type = PERF_RECORD_ITRACE_START;
+ input.header.size = sizeof(event.record.itrace_start) + 0x8;
+
+ memset(&config, 0, sizeof(config));
+ config.sample_type |= (uint64_t) PERF_SAMPLE_CPU;
+
+ errcode = pev_read(&event, input.buffer,
+ input.buffer + sizeof(input.buffer), &config);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result write_bad_config(void)
+{
+ struct pev_record_itrace_start itrace_start;
+ struct pev_config config;
+ struct pev_event event;
+ uint32_t cpu;
+ uint8_t buffer[128];
+ int errcode;
+
+ memset(&itrace_start, 0, sizeof(itrace_start));
+
+ pev_event_init(&event);
+ event.type = PERF_RECORD_ITRACE_START;
+ event.record.itrace_start = &itrace_start;
+ event.sample.cpu = &cpu;
+
+ memset(&config, 0, sizeof(config));
+ config.sample_type |= (uint64_t) PERF_SAMPLE_CPU;
+
+ errcode = pev_write(&event, buffer, buffer + sizeof(buffer), &config);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bad_string(uint16_t type)
+{
+ union {
+ struct perf_event_header header;
+ uint8_t buffer[512];
+ } input;
+
+ struct pev_config config;
+ struct pev_event event;
+ int errcode;
+
+ pev_config_init(&config);
+
+ memset(input.buffer, 0xcc, sizeof(input.buffer));
+ input.header.type = type;
+ input.header.misc = 0;
+ input.header.size = 0x50;
+
+ errcode = pev_read(&event, input.buffer,
+ input.buffer + sizeof(input.buffer), &config);
+ ptu_int_eq(errcode, -pte_bad_packet);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mmap(struct pev_fixture *pfix)
+{
+ union {
+ struct pev_record_mmap record;
+ char buffer[1024];
+ } mmap;
+
+ mmap.record.pid = 0xa;
+ mmap.record.tid = 0xb;
+ mmap.record.addr = 0xa00100ull;
+ mmap.record.len = 0x110ull;
+ mmap.record.pgoff = 0xb0000ull;
+ strcpy(mmap.record.filename, "foo.so");
+
+ pfix->event[0].record.mmap = &mmap.record;
+ pfix->event[0].type = PERF_RECORD_MMAP;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_ptr(pfix->event[1].record.mmap);
+ ptu_uint_eq(pfix->event[1].record.mmap->pid, mmap.record.pid);
+ ptu_uint_eq(pfix->event[1].record.mmap->tid, mmap.record.tid);
+ ptu_uint_eq(pfix->event[1].record.mmap->addr, mmap.record.addr);
+ ptu_uint_eq(pfix->event[1].record.mmap->len, mmap.record.len);
+ ptu_uint_eq(pfix->event[1].record.mmap->pgoff, mmap.record.pgoff);
+ ptu_str_eq(pfix->event[1].record.mmap->filename, mmap.record.filename);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lost(struct pev_fixture *pfix)
+{
+ struct pev_record_lost lost;
+
+ lost.id = 0xa042ull;
+ lost.lost = 0xeull;
+
+ pfix->event[0].record.lost = &lost;
+ pfix->event[0].type = PERF_RECORD_LOST;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_ptr(pfix->event[1].record.lost);
+ ptu_uint_eq(pfix->event[1].record.lost->id, lost.id);
+ ptu_uint_eq(pfix->event[1].record.lost->lost, lost.lost);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result comm(struct pev_fixture *pfix)
+{
+ union {
+ struct pev_record_comm record;
+ char buffer[1024];
+ } comm;
+
+ comm.record.pid = 0xa;
+ comm.record.tid = 0xb;
+ strcpy(comm.record.comm, "foo -b ar");
+
+ pfix->event[0].record.comm = &comm.record;
+ pfix->event[0].type = PERF_RECORD_COMM;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_ptr(pfix->event[1].record.comm);
+ ptu_uint_eq(pfix->event[1].record.comm->pid, comm.record.pid);
+ ptu_uint_eq(pfix->event[1].record.comm->tid, comm.record.tid);
+ ptu_str_eq(pfix->event[1].record.comm->comm, comm.record.comm);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result record_exit(struct pev_fixture *pfix)
+{
+ struct pev_record_exit exit;
+
+ exit.pid = 0xa;
+ exit.ppid = 0xaa;
+ exit.tid = 0xb;
+ exit.ptid = 0xab;
+ exit.time = 0xabcdefull;
+
+ pfix->event[0].record.exit = &exit;
+ pfix->event[0].type = PERF_RECORD_EXIT;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_ptr(pfix->event[1].record.exit);
+ ptu_uint_eq(pfix->event[1].record.exit->pid, exit.pid);
+ ptu_uint_eq(pfix->event[1].record.exit->ppid, exit.ppid);
+ ptu_uint_eq(pfix->event[1].record.exit->tid, exit.tid);
+ ptu_uint_eq(pfix->event[1].record.exit->ptid, exit.ptid);
+ ptu_uint_eq(pfix->event[1].record.exit->time, exit.time);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result throttle(struct pev_fixture *pfix)
+{
+ struct pev_record_throttle throttle;
+
+ throttle.time = 0xabcdeull;
+ throttle.id = 0xa042ull;
+ throttle.stream_id = 0xb00ull;
+
+ pfix->event[0].record.throttle = &throttle;
+ pfix->event[0].type = PERF_RECORD_THROTTLE;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_ptr(pfix->event[1].record.throttle);
+ ptu_uint_eq(pfix->event[1].record.throttle->time, throttle.time);
+ ptu_uint_eq(pfix->event[1].record.throttle->id, throttle.id);
+ ptu_uint_eq(pfix->event[1].record.throttle->stream_id,
+ throttle.stream_id);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unthrottle(struct pev_fixture *pfix)
+{
+ struct pev_record_throttle throttle;
+
+ throttle.time = 0xc00042ull;
+ throttle.id = 0x23ull;
+ throttle.stream_id = 0x0ull;
+
+ pfix->event[0].record.throttle = &throttle;
+ pfix->event[0].type = PERF_RECORD_UNTHROTTLE;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_ptr(pfix->event[1].record.throttle);
+ ptu_uint_eq(pfix->event[1].record.throttle->time, throttle.time);
+ ptu_uint_eq(pfix->event[1].record.throttle->id, throttle.id);
+ ptu_uint_eq(pfix->event[1].record.throttle->stream_id,
+ throttle.stream_id);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fork(struct pev_fixture *pfix)
+{
+ struct pev_record_fork fork;
+
+ fork.pid = 0xa;
+ fork.ppid = 0xaa;
+ fork.tid = 0xb;
+ fork.ptid = 0xab;
+ fork.time = 0xabcdefull;
+
+ pfix->event[0].record.fork = &fork;
+ pfix->event[0].type = PERF_RECORD_FORK;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_ptr(pfix->event[1].record.fork);
+ ptu_uint_eq(pfix->event[1].record.fork->pid, fork.pid);
+ ptu_uint_eq(pfix->event[1].record.fork->ppid, fork.ppid);
+ ptu_uint_eq(pfix->event[1].record.fork->tid, fork.tid);
+ ptu_uint_eq(pfix->event[1].record.fork->ptid, fork.ptid);
+ ptu_uint_eq(pfix->event[1].record.fork->time, fork.time);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mmap2(struct pev_fixture *pfix)
+{
+ union {
+ struct pev_record_mmap2 record;
+ char buffer[1024];
+ } mmap2;
+
+ mmap2.record.pid = 0xa;
+ mmap2.record.tid = 0xb;
+ mmap2.record.addr = 0xa00100ull;
+ mmap2.record.len = 0x110ull;
+ mmap2.record.pgoff = 0xb0000ull;
+ mmap2.record.maj = 7;
+ mmap2.record.min = 2;
+ mmap2.record.ino = 0x8080ull;
+ mmap2.record.ino_generation = 0x4ull;
+ mmap2.record.prot = 0x755;
+ mmap2.record.flags = 0;
+ strcpy(mmap2.record.filename, "foo.so");
+
+ pfix->event[0].record.mmap2 = &mmap2.record;
+ pfix->event[0].type = PERF_RECORD_MMAP2;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_ptr(pfix->event[1].record.mmap2);
+ ptu_uint_eq(pfix->event[1].record.mmap2->pid, mmap2.record.pid);
+ ptu_uint_eq(pfix->event[1].record.mmap2->tid, mmap2.record.tid);
+ ptu_uint_eq(pfix->event[1].record.mmap2->addr, mmap2.record.addr);
+ ptu_uint_eq(pfix->event[1].record.mmap2->len, mmap2.record.len);
+ ptu_uint_eq(pfix->event[1].record.mmap2->pgoff, mmap2.record.pgoff);
+ ptu_uint_eq(pfix->event[1].record.mmap2->maj, mmap2.record.maj);
+ ptu_uint_eq(pfix->event[1].record.mmap2->min, mmap2.record.min);
+ ptu_uint_eq(pfix->event[1].record.mmap2->ino, mmap2.record.ino);
+ ptu_uint_eq(pfix->event[1].record.mmap2->ino_generation,
+ mmap2.record.ino_generation);
+ ptu_uint_eq(pfix->event[1].record.mmap2->prot, mmap2.record.prot);
+ ptu_uint_eq(pfix->event[1].record.mmap2->flags, mmap2.record.flags);
+ ptu_str_eq(pfix->event[1].record.mmap2->filename,
+ mmap2.record.filename);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result aux(struct pev_fixture *pfix)
+{
+ struct pev_record_aux aux;
+
+ aux.aux_offset = 0xc00042ull;
+ aux.aux_size = 0x23ull;
+ aux.flags = 0x0ull;
+
+ pfix->event[0].record.aux = &aux;
+ pfix->event[0].type = PERF_RECORD_AUX;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_ptr(pfix->event[1].record.aux);
+ ptu_uint_eq(pfix->event[1].record.aux->aux_offset, aux.aux_offset);
+ ptu_uint_eq(pfix->event[1].record.aux->aux_size, aux.aux_size);
+ ptu_uint_eq(pfix->event[1].record.aux->flags, aux.flags);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result itrace_start(struct pev_fixture *pfix)
+{
+ struct pev_record_itrace_start itrace_start;
+
+ itrace_start.pid = 0xa;
+ itrace_start.tid = 0xb;
+
+ pfix->event[0].record.itrace_start = &itrace_start;
+ pfix->event[0].type = PERF_RECORD_ITRACE_START;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_ptr(pfix->event[1].record.itrace_start);
+ ptu_uint_eq(pfix->event[1].record.itrace_start->pid, itrace_start.pid);
+ ptu_uint_eq(pfix->event[1].record.itrace_start->tid, itrace_start.tid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lost_samples(struct pev_fixture *pfix)
+{
+ struct pev_record_lost_samples lost_samples;
+
+ lost_samples.lost = 0xc00042ull;
+
+ pfix->event[0].record.lost_samples = &lost_samples;
+ pfix->event[0].type = PERF_RECORD_LOST_SAMPLES;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_ptr(pfix->event[1].record.lost_samples);
+ ptu_uint_eq(pfix->event[1].record.lost_samples->lost,
+ lost_samples.lost);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result switch_task(struct pev_fixture *pfix,
+ int switch_out)
+{
+ pfix->event[0].type = PERF_RECORD_SWITCH;
+ pfix->event[0].misc = switch_out ? PERF_RECORD_MISC_SWITCH_OUT : 0;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_int_eq(pfix->event[1].misc, pfix->event[0].misc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result switch_cpu_wide(struct pev_fixture *pfix,
+ int switch_out)
+{
+ struct pev_record_switch_cpu_wide switch_cpu_wide;
+
+ switch_cpu_wide.next_prev_pid = 0xa;
+ switch_cpu_wide.next_prev_tid = 0xb;
+
+ pfix->event[0].record.switch_cpu_wide = &switch_cpu_wide;
+ pfix->event[0].type = PERF_RECORD_SWITCH_CPU_WIDE;
+ pfix->event[0].misc = switch_out ? PERF_RECORD_MISC_SWITCH_OUT : 0;
+
+ ptu_test(pfix_read_write, pfix);
+ ptu_test(pfix_check_sample, pfix);
+
+ ptu_int_eq(pfix->event[1].type, pfix->event[0].type);
+ ptu_int_eq(pfix->event[1].misc, pfix->event[0].misc);
+ ptu_ptr(pfix->event[1].record.switch_cpu_wide);
+ ptu_uint_eq(pfix->event[1].record.switch_cpu_wide->next_prev_pid,
+ switch_cpu_wide.next_prev_pid);
+ ptu_uint_eq(pfix->event[1].record.switch_cpu_wide->next_prev_tid,
+ switch_cpu_wide.next_prev_tid);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct pev_fixture pfix, pfix_time, pfix_who;
+ struct ptunit_suite suite;
+
+ pfix.init = pfix_init;
+ pfix.fini = NULL;
+
+ pfix_time.init = pfix_init_sample_time;
+ pfix_time.fini = NULL;
+
+ pfix_who.init = pfix_init_sample_who;
+ pfix_who.fini = NULL;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, time_to_tsc_null);
+ ptu_run(suite, time_from_tsc_null);
+
+ ptu_run(suite, time_to_tsc);
+ ptu_run(suite, time_from_tsc);
+
+ ptu_run(suite, time_to_tsc_bad_config);
+ ptu_run(suite, time_from_tsc_bad_config);
+ ptu_run(suite, read_bad_config);
+ ptu_run(suite, write_bad_config);
+
+ ptu_run_p(suite, bad_string, PERF_RECORD_MMAP);
+ ptu_run_p(suite, bad_string, PERF_RECORD_COMM);
+ ptu_run_p(suite, bad_string, PERF_RECORD_MMAP2);
+
+ ptu_run_f(suite, mmap, pfix);
+ ptu_run_f(suite, lost, pfix);
+ ptu_run_f(suite, comm, pfix);
+ ptu_run_f(suite, record_exit, pfix);
+ ptu_run_f(suite, throttle, pfix);
+ ptu_run_f(suite, unthrottle, pfix);
+ ptu_run_f(suite, fork, pfix);
+ ptu_run_f(suite, mmap2, pfix);
+ ptu_run_f(suite, aux, pfix);
+ ptu_run_f(suite, itrace_start, pfix);
+ ptu_run_f(suite, lost_samples, pfix);
+ ptu_run_fp(suite, switch_task, pfix, 0);
+ ptu_run_fp(suite, switch_task, pfix, 1);
+ ptu_run_fp(suite, switch_cpu_wide, pfix, 0);
+ ptu_run_fp(suite, switch_cpu_wide, pfix, 1);
+
+ ptu_run_f(suite, mmap, pfix_time);
+ ptu_run_f(suite, lost, pfix_time);
+ ptu_run_f(suite, comm, pfix_time);
+ ptu_run_f(suite, record_exit, pfix_time);
+ ptu_run_f(suite, throttle, pfix_time);
+ ptu_run_f(suite, unthrottle, pfix_time);
+ ptu_run_f(suite, fork, pfix_time);
+ ptu_run_f(suite, mmap2, pfix_time);
+ ptu_run_f(suite, aux, pfix_time);
+ ptu_run_f(suite, itrace_start, pfix_time);
+ ptu_run_f(suite, lost_samples, pfix_time);
+ ptu_run_fp(suite, switch_task, pfix_time, 0);
+ ptu_run_fp(suite, switch_task, pfix_time, 1);
+ ptu_run_fp(suite, switch_cpu_wide, pfix_time, 0);
+ ptu_run_fp(suite, switch_cpu_wide, pfix_time, 1);
+
+ ptu_run_f(suite, mmap, pfix_who);
+ ptu_run_f(suite, lost, pfix_who);
+ ptu_run_f(suite, comm, pfix_who);
+ ptu_run_f(suite, record_exit, pfix_who);
+ ptu_run_f(suite, throttle, pfix_who);
+ ptu_run_f(suite, unthrottle, pfix_who);
+ ptu_run_f(suite, fork, pfix_who);
+ ptu_run_f(suite, mmap2, pfix_who);
+ ptu_run_f(suite, aux, pfix_who);
+ ptu_run_f(suite, itrace_start, pfix_who);
+ ptu_run_f(suite, lost_samples, pfix_who);
+ ptu_run_fp(suite, switch_task, pfix_who, 0);
+ ptu_run_fp(suite, switch_task, pfix_who, 1);
+ ptu_run_fp(suite, switch_cpu_wide, pfix_who, 0);
+ ptu_run_fp(suite, switch_cpu_wide, pfix_who, 1);
+
+ return ptunit_report(&suite);
+}
diff --git a/ptdump/CMakeLists.txt b/ptdump/CMakeLists.txt
new file mode 100644
index 000000000000..9d5c9264726d
--- /dev/null
+++ b/ptdump/CMakeLists.txt
@@ -0,0 +1,54 @@
+# Copyright (c) 2013-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+include_directories(
+ include
+ ../libipt/internal/include
+)
+
+set(PTDUMP_FILES
+ src/ptdump.c
+ ../libipt/src/pt_last_ip.c
+ ../libipt/src/pt_cpu.c
+ ../libipt/src/pt_time.c
+)
+
+if (CMAKE_HOST_UNIX)
+ set(PTDUMP_FILES ${PTDUMP_FILES} ../libipt/src/posix/pt_cpuid.c)
+endif (CMAKE_HOST_UNIX)
+
+if (CMAKE_HOST_WIN32)
+ set(PTDUMP_FILES ${PTDUMP_FILES} ../libipt/src/windows/pt_cpuid.c)
+endif (CMAKE_HOST_WIN32)
+
+add_executable(ptdump
+ ${PTDUMP_FILES}
+)
+
+target_link_libraries(ptdump libipt)
+if (SIDEBAND)
+ target_link_libraries(ptdump libipt-sb)
+endif (SIDEBAND)
diff --git a/ptdump/src/ptdump.c b/ptdump/src/ptdump.c
new file mode 100644
index 000000000000..3e27df722f01
--- /dev/null
+++ b/ptdump/src/ptdump.c
@@ -0,0 +1,1951 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_cpu.h"
+#include "pt_last_ip.h"
+#include "pt_time.h"
+#include "pt_compiler.h"
+
+#include "intel-pt.h"
+
+#if defined(FEATURE_SIDEBAND)
+# include "libipt-sb.h"
+#endif
+
+#include <stdlib.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <limits.h>
+
+#if defined(_MSC_VER) && (_MSC_VER < 1900)
+# define snprintf _snprintf_c
+#endif
+
+
+struct ptdump_options {
+#if defined(FEATURE_SIDEBAND)
+ /* Sideband dump flags. */
+ uint32_t sb_dump_flags;
+#endif
+ /* Show the current offset in the trace stream. */
+ uint32_t show_offset:1;
+
+ /* Show raw packet bytes. */
+ uint32_t show_raw_bytes:1;
+
+ /* Show last IP for packets with IP payloads. */
+ uint32_t show_last_ip:1;
+
+ /* Show the execution mode on mode.exec. */
+ uint32_t show_exec_mode:1;
+
+ /* Keep track of time. */
+ uint32_t track_time:1;
+
+ /* Show the estimated TSC for timing related packets. */
+ uint32_t show_time:1;
+
+ /* Show time calibration. */
+ uint32_t show_tcal:1;
+
+ /* Show timing information as delta to the previous value. */
+ uint32_t show_time_as_delta:1;
+
+ /* Quiet mode: Don't print anything but errors. */
+ uint32_t quiet:1;
+
+ /* Don't show PAD packets. */
+ uint32_t no_pad:1;
+
+ /* Do not try to sync the decoder. */
+ uint32_t no_sync:1;
+
+ /* Do not calibrate timing. */
+ uint32_t no_tcal:1;
+
+ /* Do not expect wall-clock time. */
+ uint32_t no_wall_clock:1;
+
+ /* Don't show timing packets. */
+ uint32_t no_timing:1;
+
+ /* Don't show CYC packets and ignore them when tracking time. */
+ uint32_t no_cyc:1;
+
+#if defined(FEATURE_SIDEBAND)
+ /* Print sideband warnings. */
+ uint32_t print_sb_warnings:1;
+#endif
+};
+
+struct ptdump_buffer {
+ /* The trace offset. */
+ char offset[17];
+
+ /* The raw packet bytes. */
+ char raw[33];
+
+ /* The packet opcode. */
+ char opcode[10];
+
+ union {
+ /* The standard packet payload. */
+ char standard[25];
+
+ /* An extended packet payload. */
+ char extended[48];
+ } payload;
+
+ /* The tracking information. */
+ struct {
+ /* The tracking identifier. */
+ char id[5];
+
+ /* The tracking information. */
+ char payload[17];
+ } tracking;
+
+ /* A flag telling whether an extended payload is used. */
+ uint32_t use_ext_payload:1;
+
+ /* A flag telling whether to skip printing this buffer. */
+ uint32_t skip:1;
+
+ /* A flag telling whether to skip printing the time. */
+ uint32_t skip_time:1;
+
+ /* A flag telling whether to skip printing the calibration. */
+ uint32_t skip_tcal:1;
+};
+
+struct ptdump_tracking {
+#if defined(FEATURE_SIDEBAND)
+ /* The sideband session. */
+ struct pt_sb_session *session;
+#endif
+
+ /* Track last-ip. */
+ struct pt_last_ip last_ip;
+
+ /* Track time calibration. */
+ struct pt_time_cal tcal;
+
+ /* Track time. */
+ struct pt_time time;
+
+ /* The last estimated TSC. */
+ uint64_t tsc;
+
+ /* The last calibration value. */
+ uint64_t fcr;
+
+ /* Header vs. normal decode. Set if decoding PSB+. */
+ uint32_t in_header:1;
+};
+
+static int usage(const char *name)
+{
+ fprintf(stderr,
+ "%s: [<options>] <ptfile>. Use --help or -h for help.\n",
+ name);
+ return -1;
+}
+
+static int no_file_error(const char *name)
+{
+ fprintf(stderr, "%s: No processor trace file specified.\n", name);
+ return -1;
+}
+
+static int unknown_option_error(const char *arg, const char *name)
+{
+ fprintf(stderr, "%s: unknown option: %s.\n", name, arg);
+ return -1;
+}
+
+static int help(const char *name)
+{
+ printf("usage: %s [<options>] <ptfile>[:<from>[-<to>]\n\n", name);
+ printf("options:\n");
+ printf(" --help|-h this text.\n");
+ printf(" --version display version information and exit.\n");
+ printf(" --no-sync don't try to sync to the first PSB, assume a valid\n");
+ printf(" sync point at the beginning of the trace.\n");
+ printf(" --quiet don't print anything but errors.\n");
+ printf(" --no-pad don't show PAD packets.\n");
+ printf(" --no-timing don't show timing packets.\n");
+ printf(" --no-cyc don't show CYC packets and ignore them when tracking time.\n");
+ printf(" --no-offset don't show the offset as the first column.\n");
+ printf(" --raw show raw packet bytes.\n");
+ printf(" --lastip show last IP updates on packets with IP payloads.\n");
+ printf(" --exec-mode show the current execution mode on mode.exec packets.\n");
+ printf(" --time show the estimated TSC on timing packets.\n");
+ printf(" --tcal show time calibration information.\n");
+ printf(" --time-delta show timing information as delta.\n");
+ printf(" --no-tcal skip timing calibration.\n");
+ printf(" this will result in errors when CYC packets are encountered.\n");
+ printf(" --no-wall-clock suppress the no-time error and print relative time.\n");
+#if defined(FEATURE_SIDEBAND)
+ printf(" --sb:compact | --sb show sideband records in compact format.\n");
+ printf(" --sb:verbose show sideband records in verbose format.\n");
+ printf(" --sb:filename show the filename on sideband records.\n");
+ printf(" --sb:offset show the offset on sideband records.\n");
+ printf(" --sb:time show the time on sideband records.\n");
+ printf(" --sb:warn show sideband warnings.\n");
+#if defined(FEATURE_PEVENT)
+ printf(" --pevent[:primary/:secondary] <file>[:<from>[-<to>]]\n");
+ printf(" load a perf_event sideband stream from <file>.\n");
+ printf(" an optional offset or range can be given.\n");
+ printf(" --pevent:sample-type <val> set perf_event_attr.sample_type to <val> (default: 0).\n");
+ printf(" --pevent:time-zero <val> set perf_event_mmap_page.time_zero to <val> (default: 0).\n");
+ printf(" --pevent:time-shift <val> set perf_event_mmap_page.time_shift to <val> (default: 0).\n");
+ printf(" --pevent:time-mult <val> set perf_event_mmap_page.time_mult to <val> (default: 1).\n");
+ printf(" --pevent:tsc-offset <val> show perf events <val> ticks earlier.\n");
+ printf(" --pevent:kernel-start <val> the start address of the kernel.\n");
+ printf(" --pevent:sysroot <path> ignored.\n");
+ printf(" --pevent:kcore <file> ignored.\n");
+ printf(" --pevent:vdso-x64 <file> ignored.\n");
+ printf(" --pevent:vdso-x32 <file> ignored.\n");
+ printf(" --pevent:vdso-ia32 <file> ignored.\n");
+#endif /* defined(FEATURE_PEVENT) */
+#endif /* defined(FEATURE_SIDEBAND) */
+ printf(" --cpu none|auto|f/m[/s] set cpu to the given value and decode according to:\n");
+ printf(" none spec (default)\n");
+ printf(" auto current cpu\n");
+ printf(" f/m[/s] family/model[/stepping]\n");
+ printf(" --mtc-freq <n> set the MTC frequency (IA32_RTIT_CTL[17:14]) to <n>.\n");
+ printf(" --nom-freq <n> set the nominal frequency (MSR_PLATFORM_INFO[15:8]) to <n>.\n");
+ printf(" --cpuid-0x15.eax set the value of cpuid[0x15].eax.\n");
+ printf(" --cpuid-0x15.ebx set the value of cpuid[0x15].ebx.\n");
+ printf(" <ptfile>[:<from>[-<to>]] load the processor trace data from <ptfile>;\n");
+
+ return 1;
+}
+
+static int version(const char *name)
+{
+ struct pt_version v = pt_library_version();
+
+ printf("%s-%d.%d.%d%s / libipt-%" PRIu8 ".%" PRIu8 ".%" PRIu32 "%s\n",
+ name, PT_VERSION_MAJOR, PT_VERSION_MINOR, PT_VERSION_BUILD,
+ PT_VERSION_EXT, v.major, v.minor, v.build, v.ext);
+
+ return 1;
+}
+
+static int parse_range(const char *arg, uint64_t *begin, uint64_t *end)
+{
+ char *rest;
+
+ if (!arg || !*arg)
+ return 0;
+
+ errno = 0;
+ *begin = strtoull(arg, &rest, 0);
+ if (errno)
+ return -1;
+
+ if (!*rest)
+ return 1;
+
+ if (*rest != '-')
+ return -1;
+
+ *end = strtoull(rest+1, &rest, 0);
+ if (errno || *rest)
+ return -1;
+
+ return 2;
+}
+
+/* Preprocess a filename argument.
+ *
+ * A filename may optionally be followed by a file offset or a file range
+ * argument separated by ':'. Split the original argument into the filename
+ * part and the offset/range part.
+ *
+ * If no end address is specified, set @size to zero.
+ * If no offset is specified, set @offset to zero.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int preprocess_filename(char *filename, uint64_t *offset, uint64_t *size)
+{
+ uint64_t begin, end;
+ char *range;
+ int parts;
+
+ if (!filename || !offset || !size)
+ return -pte_internal;
+
+ /* Search from the end as the filename may also contain ':'. */
+ range = strrchr(filename, ':');
+ if (!range) {
+ *offset = 0ull;
+ *size = 0ull;
+
+ return 0;
+ }
+
+ /* Let's try to parse an optional range suffix.
+ *
+ * If we can, remove it from the filename argument.
+ * If we can not, assume that the ':' is part of the filename, e.g. a
+ * drive letter on Windows.
+ */
+ parts = parse_range(range + 1, &begin, &end);
+ if (parts <= 0) {
+ *offset = 0ull;
+ *size = 0ull;
+
+ return 0;
+ }
+
+ if (parts == 1) {
+ *offset = begin;
+ *size = 0ull;
+
+ *range = 0;
+
+ return 0;
+ }
+
+ if (parts == 2) {
+ if (end <= begin)
+ return -pte_invalid;
+
+ *offset = begin;
+ *size = end - begin;
+
+ *range = 0;
+
+ return 0;
+ }
+
+ return -pte_internal;
+}
+
+static int load_file(uint8_t **buffer, size_t *psize, const char *filename,
+ uint64_t offset, uint64_t size, const char *prog)
+{
+ uint8_t *content;
+ size_t read;
+ FILE *file;
+ long fsize, begin, end;
+ int errcode;
+
+ if (!buffer || !psize || !filename || !prog) {
+ fprintf(stderr, "%s: internal error.\n", prog ? prog : "");
+ return -1;
+ }
+
+ errno = 0;
+ file = fopen(filename, "rb");
+ if (!file) {
+ fprintf(stderr, "%s: failed to open %s: %d.\n",
+ prog, filename, errno);
+ return -1;
+ }
+
+ errcode = fseek(file, 0, SEEK_END);
+ if (errcode) {
+ fprintf(stderr, "%s: failed to determine size of %s: %d.\n",
+ prog, filename, errno);
+ goto err_file;
+ }
+
+ fsize = ftell(file);
+ if (fsize < 0) {
+ fprintf(stderr, "%s: failed to determine size of %s: %d.\n",
+ prog, filename, errno);
+ goto err_file;
+ }
+
+ begin = (long) offset;
+ if (((uint64_t) begin != offset) || (fsize <= begin)) {
+ fprintf(stderr,
+ "%s: bad offset 0x%" PRIx64 " into %s.\n",
+ prog, offset, filename);
+ goto err_file;
+ }
+
+ end = fsize;
+ if (size) {
+ uint64_t range_end;
+
+ range_end = offset + size;
+ if ((uint64_t) end < range_end) {
+ fprintf(stderr,
+ "%s: bad range 0x%" PRIx64 " in %s.\n",
+ prog, range_end, filename);
+ goto err_file;
+ }
+
+ end = (long) range_end;
+ }
+
+ fsize = end - begin;
+
+ content = malloc(fsize);
+ if (!content) {
+ fprintf(stderr, "%s: failed to allocated memory %s.\n",
+ prog, filename);
+ goto err_file;
+ }
+
+ errcode = fseek(file, begin, SEEK_SET);
+ if (errcode) {
+ fprintf(stderr, "%s: failed to load %s: %d.\n",
+ prog, filename, errno);
+ goto err_content;
+ }
+
+ read = fread(content, fsize, 1, file);
+ if (read != 1) {
+ fprintf(stderr, "%s: failed to load %s: %d.\n",
+ prog, filename, errno);
+ goto err_content;
+ }
+
+ fclose(file);
+
+ *buffer = content;
+ *psize = fsize;
+
+ return 0;
+
+err_content:
+ free(content);
+
+err_file:
+ fclose(file);
+ return -1;
+}
+
+static int load_pt(struct pt_config *config, const char *filename,
+ uint64_t foffset, uint64_t fsize, const char *prog)
+{
+ uint8_t *buffer;
+ size_t size;
+ int errcode;
+
+ errcode = load_file(&buffer, &size, filename, foffset, fsize, prog);
+ if (errcode < 0)
+ return errcode;
+
+ config->begin = buffer;
+ config->end = buffer + size;
+
+ return 0;
+}
+
+static int diag(const char *errstr, uint64_t offset, int errcode)
+{
+ if (errcode)
+ printf("[%" PRIx64 ": %s: %s]\n", offset, errstr,
+ pt_errstr(pt_errcode(errcode)));
+ else
+ printf("[%" PRIx64 ": %s]\n", offset, errstr);
+
+ return errcode;
+}
+
+static void ptdump_tracking_init(struct ptdump_tracking *tracking)
+{
+ if (!tracking)
+ return;
+
+ pt_last_ip_init(&tracking->last_ip);
+ pt_tcal_init(&tracking->tcal);
+ pt_time_init(&tracking->time);
+
+#if defined(FEATURE_SIDEBAND)
+ tracking->session = NULL;
+#endif
+ tracking->tsc = 0ull;
+ tracking->fcr = 0ull;
+ tracking->in_header = 0;
+}
+
+static void ptdump_tracking_reset(struct ptdump_tracking *tracking)
+{
+ if (!tracking)
+ return;
+
+ pt_last_ip_init(&tracking->last_ip);
+ pt_tcal_init(&tracking->tcal);
+ pt_time_init(&tracking->time);
+
+ tracking->tsc = 0ull;
+ tracking->fcr = 0ull;
+ tracking->in_header = 0;
+}
+
+static void ptdump_tracking_fini(struct ptdump_tracking *tracking)
+{
+ if (!tracking)
+ return;
+
+#if defined(FEATURE_SIDEBAND)
+ pt_sb_free(tracking->session);
+#endif
+}
+
+#define print_field(field, ...) \
+ do { \
+ /* Avoid partial overwrites. */ \
+ memset(field, 0, sizeof(field)); \
+ snprintf(field, sizeof(field), __VA_ARGS__); \
+ } while (0)
+
+
+static int print_buffer(struct ptdump_buffer *buffer, uint64_t offset,
+ const struct ptdump_options *options)
+{
+ const char *sep;
+
+ if (!buffer)
+ return diag("error printing buffer", offset, -pte_internal);
+
+ if (buffer->skip || options->quiet)
+ return 0;
+
+ /* Make sure the first column starts at the beginning of the line - no
+ * matter what column is first.
+ */
+ sep = "";
+
+ if (options->show_offset) {
+ printf("%-*s", (int) sizeof(buffer->offset), buffer->offset);
+ sep = " ";
+ }
+
+ if (buffer->raw[0]) {
+ printf("%s%-*s", sep, (int) sizeof(buffer->raw), buffer->raw);
+ sep = " ";
+ }
+
+ if (buffer->payload.standard[0])
+ printf("%s%-*s", sep, (int) sizeof(buffer->opcode),
+ buffer->opcode);
+ else
+ printf("%s%s", sep, buffer->opcode);
+
+ /* We printed at least one column. From this point on, we don't need
+ * the separator any longer.
+ */
+
+ if (buffer->use_ext_payload)
+ printf(" %s", buffer->payload.extended);
+ else if (buffer->tracking.id[0]) {
+ printf(" %-*s", (int) sizeof(buffer->payload.standard),
+ buffer->payload.standard);
+
+ printf(" %-*s", (int) sizeof(buffer->tracking.id),
+ buffer->tracking.id);
+ printf("%s", buffer->tracking.payload);
+ } else if (buffer->payload.standard[0])
+ printf(" %s", buffer->payload.standard);
+
+ printf("\n");
+ return 0;
+}
+
+static int print_raw(struct ptdump_buffer *buffer, uint64_t offset,
+ const struct pt_packet *packet,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+ char *bbegin, *bend;
+
+ if (!buffer || !packet)
+ return diag("error printing packet", offset, -pte_internal);
+
+ begin = config->begin + offset;
+ end = begin + packet->size;
+
+ if (config->end < end)
+ return diag("bad packet size", offset, -pte_bad_packet);
+
+ bbegin = buffer->raw;
+ bend = bbegin + sizeof(buffer->raw);
+
+ for (; begin < end; ++begin) {
+ char *pos;
+
+ pos = bbegin;
+ bbegin += 2;
+
+ if (bend <= bbegin)
+ return diag("truncating raw packet", offset, 0);
+
+ sprintf(pos, "%02x", *begin);
+ }
+
+ return 0;
+}
+
+static int track_last_ip(struct ptdump_buffer *buffer,
+ struct pt_last_ip *last_ip, uint64_t offset,
+ const struct pt_packet_ip *packet,
+ const struct ptdump_options *options,
+ const struct pt_config *config)
+{
+ uint64_t ip;
+ int errcode;
+
+ if (!buffer || !options)
+ return diag("error tracking last-ip", offset, -pte_internal);
+
+ print_field(buffer->tracking.id, "ip");
+
+ errcode = pt_last_ip_update_ip(last_ip, packet, config);
+ if (errcode < 0) {
+ print_field(buffer->tracking.payload, "<unavailable>");
+
+ return diag("error tracking last-ip", offset, errcode);
+ }
+
+ errcode = pt_last_ip_query(&ip, last_ip);
+ if (errcode < 0) {
+ if (errcode == -pte_ip_suppressed)
+ print_field(buffer->tracking.payload, "<suppressed>");
+ else {
+ print_field(buffer->tracking.payload, "<unavailable>");
+
+ return diag("error tracking last-ip", offset, errcode);
+ }
+ } else
+ print_field(buffer->tracking.payload, "%016" PRIx64, ip);
+
+ return 0;
+}
+
+
+static int print_time(struct ptdump_buffer *buffer,
+ struct ptdump_tracking *tracking, uint64_t offset,
+ const struct ptdump_options *options)
+{
+ uint64_t tsc;
+ int errcode;
+
+ if (!tracking || !options)
+ return diag("error printing time", offset, -pte_internal);
+
+ print_field(buffer->tracking.id, "tsc");
+
+ errcode = pt_time_query_tsc(&tsc, NULL, NULL, &tracking->time);
+ if (errcode < 0) {
+ switch (-errcode) {
+ case pte_no_time:
+ if (options->no_wall_clock)
+ break;
+
+ fallthrough;
+ default:
+ diag("error printing time", offset, errcode);
+ print_field(buffer->tracking.payload, "<unavailable>");
+ return errcode;
+ }
+ }
+
+ if (options->show_time_as_delta) {
+ uint64_t old_tsc;
+
+ old_tsc = tracking->tsc;
+ if (old_tsc <= tsc)
+ print_field(buffer->tracking.payload, "+%" PRIx64,
+ tsc - old_tsc);
+ else
+ print_field(buffer->tracking.payload, "-%" PRIx64,
+ old_tsc - tsc);
+
+ tracking->tsc = tsc;
+ } else
+ print_field(buffer->tracking.payload, "%016" PRIx64, tsc);
+
+ return 0;
+}
+
+static int print_tcal(struct ptdump_buffer *buffer,
+ struct ptdump_tracking *tracking, uint64_t offset,
+ const struct ptdump_options *options)
+{
+ uint64_t fcr;
+ double dfcr;
+ int errcode;
+
+ if (!tracking || !options)
+ return diag("error printing time", offset, -pte_internal);
+
+ print_field(buffer->tracking.id, "fcr");
+
+ errcode = pt_tcal_fcr(&fcr, &tracking->tcal);
+ if (errcode < 0) {
+ print_field(buffer->tracking.payload, "<unavailable>");
+ return diag("error printing time", offset, errcode);
+ }
+
+ /* We print fcr as double to account for the shift. */
+ dfcr = (double) fcr;
+ dfcr /= (double) (1ull << pt_tcal_fcr_shr);
+
+ if (options->show_time_as_delta) {
+ uint64_t old_fcr;
+ double dold_fcr;
+
+ old_fcr = tracking->fcr;
+
+ /* We print fcr as double to account for the shift. */
+ dold_fcr = (double) old_fcr;
+ dold_fcr /= (double) (1ull << pt_tcal_fcr_shr);
+
+ if (old_fcr <= fcr)
+ print_field(buffer->tracking.payload, "+%.3f",
+ dfcr - dold_fcr);
+ else
+ print_field(buffer->tracking.payload, "-%.3f",
+ dold_fcr - dfcr);
+
+ tracking->fcr = fcr;
+ } else
+ print_field(buffer->tracking.payload, "%.3f", dfcr);
+
+ return 0;
+}
+
+static int sb_track_time(struct ptdump_tracking *tracking,
+ const struct ptdump_options *options, uint64_t offset)
+{
+ uint64_t tsc;
+ int errcode;
+
+ if (!tracking || !options)
+ return diag("time tracking error", offset, -pte_internal);
+
+ errcode = pt_time_query_tsc(&tsc, NULL, NULL, &tracking->time);
+ if ((errcode < 0) && (errcode != -pte_no_time))
+ return diag("time tracking error", offset, errcode);
+
+#if defined(FEATURE_SIDEBAND)
+ errcode = pt_sb_dump(tracking->session, stdout, options->sb_dump_flags,
+ tsc);
+ if (errcode < 0)
+ return diag("sideband dump error", offset, errcode);
+#endif
+ return 0;
+}
+
+static int track_time(struct ptdump_buffer *buffer,
+ struct ptdump_tracking *tracking, uint64_t offset,
+ const struct ptdump_options *options)
+{
+ if (!tracking || !options)
+ return diag("error tracking time", offset, -pte_internal);
+
+ if (options->show_tcal && !buffer->skip_tcal)
+ print_tcal(buffer, tracking, offset, options);
+
+ if (options->show_time && !buffer->skip_time)
+ print_time(buffer, tracking, offset, options);
+
+ return sb_track_time(tracking, options, offset);
+}
+
+static int track_tsc(struct ptdump_buffer *buffer,
+ struct ptdump_tracking *tracking, uint64_t offset,
+ const struct pt_packet_tsc *packet,
+ const struct ptdump_options *options,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ if (!buffer || !tracking || !options)
+ return diag("error tracking time", offset, -pte_internal);
+
+ if (!options->no_tcal) {
+ errcode = tracking->in_header ?
+ pt_tcal_header_tsc(&tracking->tcal, packet, config) :
+ pt_tcal_update_tsc(&tracking->tcal, packet, config);
+ if (errcode < 0)
+ diag("error calibrating time", offset, errcode);
+ }
+
+ errcode = pt_time_update_tsc(&tracking->time, packet, config);
+ if (errcode < 0)
+ diag("error updating time", offset, errcode);
+
+ return track_time(buffer, tracking, offset, options);
+}
+
+static int track_cbr(struct ptdump_buffer *buffer,
+ struct ptdump_tracking *tracking, uint64_t offset,
+ const struct pt_packet_cbr *packet,
+ const struct ptdump_options *options,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ if (!buffer || !tracking || !options)
+ return diag("error tracking time", offset, -pte_internal);
+
+ if (!options->no_tcal) {
+ errcode = tracking->in_header ?
+ pt_tcal_header_cbr(&tracking->tcal, packet, config) :
+ pt_tcal_update_cbr(&tracking->tcal, packet, config);
+ if (errcode < 0)
+ diag("error calibrating time", offset, errcode);
+ }
+
+ errcode = pt_time_update_cbr(&tracking->time, packet, config);
+ if (errcode < 0)
+ diag("error updating time", offset, errcode);
+
+ /* There is no timing update at this packet. */
+ buffer->skip_time = 1;
+
+ return track_time(buffer, tracking, offset, options);
+}
+
+static int track_tma(struct ptdump_buffer *buffer,
+ struct ptdump_tracking *tracking, uint64_t offset,
+ const struct pt_packet_tma *packet,
+ const struct ptdump_options *options,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ if (!buffer || !tracking || !options)
+ return diag("error tracking time", offset, -pte_internal);
+
+ if (!options->no_tcal) {
+ errcode = pt_tcal_update_tma(&tracking->tcal, packet, config);
+ if (errcode < 0)
+ diag("error calibrating time", offset, errcode);
+ }
+
+ errcode = pt_time_update_tma(&tracking->time, packet, config);
+ if (errcode < 0)
+ diag("error updating time", offset, errcode);
+
+ /* There is no calibration update at this packet. */
+ buffer->skip_tcal = 1;
+
+ return track_time(buffer, tracking, offset, options);
+}
+
+static int track_mtc(struct ptdump_buffer *buffer,
+ struct ptdump_tracking *tracking, uint64_t offset,
+ const struct pt_packet_mtc *packet,
+ const struct ptdump_options *options,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ if (!buffer || !tracking || !options)
+ return diag("error tracking time", offset, -pte_internal);
+
+ if (!options->no_tcal) {
+ errcode = pt_tcal_update_mtc(&tracking->tcal, packet, config);
+ if (errcode < 0)
+ diag("error calibrating time", offset, errcode);
+ }
+
+ errcode = pt_time_update_mtc(&tracking->time, packet, config);
+ if (errcode < 0)
+ diag("error updating time", offset, errcode);
+
+ return track_time(buffer, tracking, offset, options);
+}
+
+static int track_cyc(struct ptdump_buffer *buffer,
+ struct ptdump_tracking *tracking, uint64_t offset,
+ const struct pt_packet_cyc *packet,
+ const struct ptdump_options *options,
+ const struct pt_config *config)
+{
+ uint64_t fcr;
+ int errcode;
+
+ if (!buffer || !tracking || !options)
+ return diag("error tracking time", offset, -pte_internal);
+
+ /* Initialize to zero in case of calibration errors. */
+ fcr = 0ull;
+
+ if (!options->no_tcal) {
+ errcode = pt_tcal_fcr(&fcr, &tracking->tcal);
+ if (errcode < 0)
+ diag("calibration error", offset, errcode);
+
+ errcode = pt_tcal_update_cyc(&tracking->tcal, packet, config);
+ if (errcode < 0)
+ diag("error calibrating time", offset, errcode);
+ }
+
+ errcode = pt_time_update_cyc(&tracking->time, packet, config, fcr);
+ if (errcode < 0)
+ diag("error updating time", offset, errcode);
+ else if (!fcr)
+ diag("error updating time: no calibration", offset, 0);
+
+ /* There is no calibration update at this packet. */
+ buffer->skip_tcal = 1;
+
+ return track_time(buffer, tracking, offset, options);
+}
+
+static uint64_t sext(uint64_t val, uint8_t sign)
+{
+ uint64_t signbit, mask;
+
+ signbit = 1ull << (sign - 1);
+ mask = ~0ull << sign;
+
+ return val & signbit ? val | mask : val & ~mask;
+}
+
+static int print_ip_payload(struct ptdump_buffer *buffer, uint64_t offset,
+ const struct pt_packet_ip *packet)
+{
+ if (!buffer || !packet)
+ return diag("error printing payload", offset, -pte_internal);
+
+ switch (packet->ipc) {
+ case pt_ipc_suppressed:
+ print_field(buffer->payload.standard, "%x: ????????????????",
+ pt_ipc_suppressed);
+ return 0;
+
+ case pt_ipc_update_16:
+ print_field(buffer->payload.standard, "%x: ????????????%04"
+ PRIx64, pt_ipc_update_16, packet->ip);
+ return 0;
+
+ case pt_ipc_update_32:
+ print_field(buffer->payload.standard, "%x: ????????%08"
+ PRIx64, pt_ipc_update_32, packet->ip);
+ return 0;
+
+ case pt_ipc_update_48:
+ print_field(buffer->payload.standard, "%x: ????%012"
+ PRIx64, pt_ipc_update_48, packet->ip);
+ return 0;
+
+ case pt_ipc_sext_48:
+ print_field(buffer->payload.standard, "%x: %016" PRIx64,
+ pt_ipc_sext_48, sext(packet->ip, 48));
+ return 0;
+
+ case pt_ipc_full:
+ print_field(buffer->payload.standard, "%x: %016" PRIx64,
+ pt_ipc_full, packet->ip);
+ return 0;
+ }
+
+ print_field(buffer->payload.standard, "%x: %016" PRIx64,
+ packet->ipc, packet->ip);
+ return diag("bad ipc", offset, -pte_bad_packet);
+}
+
+static int print_tnt_payload(struct ptdump_buffer *buffer, uint64_t offset,
+ const struct pt_packet_tnt *packet)
+{
+ uint64_t tnt;
+ uint8_t bits;
+ char *begin, *end;
+
+ if (!buffer || !packet)
+ return diag("error printing payload", offset, -pte_internal);
+
+ bits = packet->bit_size;
+ tnt = packet->payload;
+
+ begin = buffer->payload.extended;
+ end = begin + bits;
+
+ if (sizeof(buffer->payload.extended) < bits) {
+ diag("truncating tnt payload", offset, 0);
+
+ end = begin + sizeof(buffer->payload.extended);
+ }
+
+ for (; begin < end; ++begin, --bits)
+ *begin = tnt & (1ull << (bits - 1)) ? '!' : '.';
+
+ return 0;
+}
+
+static const char *print_exec_mode(const struct pt_packet_mode_exec *packet,
+ uint64_t offset)
+{
+ enum pt_exec_mode mode;
+
+ mode = pt_get_exec_mode(packet);
+ switch (mode) {
+ case ptem_64bit:
+ return "64-bit";
+
+ case ptem_32bit:
+ return "32-bit";
+
+ case ptem_16bit:
+ return "16-bit";
+
+ case ptem_unknown:
+ return "unknown";
+ }
+
+ diag("bad exec mode", offset, -pte_bad_packet);
+ return "invalid";
+}
+
+static const char *print_pwrx_wr(const struct pt_packet_pwrx *packet)
+{
+ const char *wr;
+
+ if (!packet)
+ return "err";
+
+ wr = NULL;
+ if (packet->interrupt)
+ wr = "int";
+
+ if (packet->store) {
+ if (wr)
+ return NULL;
+ wr = " st";
+ }
+
+ if (packet->autonomous) {
+ if (wr)
+ return NULL;
+ wr = " hw";
+ }
+
+ if (!wr)
+ wr = "bad";
+
+ return wr;
+}
+
+static int print_packet(struct ptdump_buffer *buffer, uint64_t offset,
+ const struct pt_packet *packet,
+ struct ptdump_tracking *tracking,
+ const struct ptdump_options *options,
+ const struct pt_config *config)
+{
+ if (!buffer || !packet || !tracking || !options)
+ return diag("error printing packet", offset, -pte_internal);
+
+ switch (packet->type) {
+ case ppt_unknown:
+ print_field(buffer->opcode, "<unknown>");
+ return 0;
+
+ case ppt_invalid:
+ print_field(buffer->opcode, "<invalid>");
+ return 0;
+
+ case ppt_psb:
+ print_field(buffer->opcode, "psb");
+
+ tracking->in_header = 1;
+ return 0;
+
+ case ppt_psbend:
+ print_field(buffer->opcode, "psbend");
+
+ tracking->in_header = 0;
+ return 0;
+
+ case ppt_pad:
+ print_field(buffer->opcode, "pad");
+
+ if (options->no_pad)
+ buffer->skip = 1;
+ return 0;
+
+ case ppt_ovf:
+ print_field(buffer->opcode, "ovf");
+ return 0;
+
+ case ppt_stop:
+ print_field(buffer->opcode, "stop");
+ return 0;
+
+ case ppt_fup:
+ print_field(buffer->opcode, "fup");
+ print_ip_payload(buffer, offset, &packet->payload.ip);
+
+ if (options->show_last_ip)
+ track_last_ip(buffer, &tracking->last_ip, offset,
+ &packet->payload.ip, options, config);
+ return 0;
+
+ case ppt_tip:
+ print_field(buffer->opcode, "tip");
+ print_ip_payload(buffer, offset, &packet->payload.ip);
+
+ if (options->show_last_ip)
+ track_last_ip(buffer, &tracking->last_ip, offset,
+ &packet->payload.ip, options, config);
+ return 0;
+
+ case ppt_tip_pge:
+ print_field(buffer->opcode, "tip.pge");
+ print_ip_payload(buffer, offset, &packet->payload.ip);
+
+ if (options->show_last_ip)
+ track_last_ip(buffer, &tracking->last_ip, offset,
+ &packet->payload.ip, options, config);
+ return 0;
+
+ case ppt_tip_pgd:
+ print_field(buffer->opcode, "tip.pgd");
+ print_ip_payload(buffer, offset, &packet->payload.ip);
+
+ if (options->show_last_ip)
+ track_last_ip(buffer, &tracking->last_ip, offset,
+ &packet->payload.ip, options, config);
+ return 0;
+
+ case ppt_pip:
+ print_field(buffer->opcode, "pip");
+ print_field(buffer->payload.standard, "%" PRIx64 "%s",
+ packet->payload.pip.cr3,
+ packet->payload.pip.nr ? ", nr" : "");
+
+ print_field(buffer->tracking.id, "cr3");
+ print_field(buffer->tracking.payload, "%016" PRIx64,
+ packet->payload.pip.cr3);
+ return 0;
+
+ case ppt_vmcs:
+ print_field(buffer->opcode, "vmcs");
+ print_field(buffer->payload.standard, "%" PRIx64,
+ packet->payload.vmcs.base);
+
+ print_field(buffer->tracking.id, "vmcs");
+ print_field(buffer->tracking.payload, "%016" PRIx64,
+ packet->payload.vmcs.base);
+ return 0;
+
+ case ppt_tnt_8:
+ print_field(buffer->opcode, "tnt.8");
+ return print_tnt_payload(buffer, offset, &packet->payload.tnt);
+
+ case ppt_tnt_64:
+ print_field(buffer->opcode, "tnt.64");
+ return print_tnt_payload(buffer, offset, &packet->payload.tnt);
+
+ case ppt_mode: {
+ const struct pt_packet_mode *mode;
+
+ mode = &packet->payload.mode;
+ switch (mode->leaf) {
+ case pt_mol_exec: {
+ const char *csd, *csl, *sep;
+
+ csd = mode->bits.exec.csd ? "cs.d" : "";
+ csl = mode->bits.exec.csl ? "cs.l" : "";
+
+ sep = csd[0] && csl[0] ? ", " : "";
+
+ print_field(buffer->opcode, "mode.exec");
+ print_field(buffer->payload.standard, "%s%s%s",
+ csd, sep, csl);
+
+ if (options->show_exec_mode) {
+ const char *em;
+
+ em = print_exec_mode(&mode->bits.exec, offset);
+ print_field(buffer->tracking.id, "em");
+ print_field(buffer->tracking.payload, "%s", em);
+ }
+ }
+ return 0;
+
+ case pt_mol_tsx: {
+ const char *intx, *abrt, *sep;
+
+ intx = mode->bits.tsx.intx ? "intx" : "";
+ abrt = mode->bits.tsx.abrt ? "abrt" : "";
+
+ sep = intx[0] && abrt[0] ? ", " : "";
+
+ print_field(buffer->opcode, "mode.tsx");
+ print_field(buffer->payload.standard, "%s%s%s",
+ intx, sep, abrt);
+ }
+ return 0;
+ }
+
+ print_field(buffer->opcode, "mode");
+ print_field(buffer->payload.standard, "leaf: %x", mode->leaf);
+
+ return diag("unknown mode leaf", offset, 0);
+ }
+
+ case ppt_tsc:
+ print_field(buffer->opcode, "tsc");
+ print_field(buffer->payload.standard, "%" PRIx64,
+ packet->payload.tsc.tsc);
+
+ if (options->track_time)
+ track_tsc(buffer, tracking, offset,
+ &packet->payload.tsc, options, config);
+
+ if (options->no_timing)
+ buffer->skip = 1;
+
+ return 0;
+
+ case ppt_cbr:
+ print_field(buffer->opcode, "cbr");
+ print_field(buffer->payload.standard, "%x",
+ packet->payload.cbr.ratio);
+
+ if (options->track_time)
+ track_cbr(buffer, tracking, offset,
+ &packet->payload.cbr, options, config);
+
+ if (options->no_timing)
+ buffer->skip = 1;
+
+ return 0;
+
+ case ppt_tma:
+ print_field(buffer->opcode, "tma");
+ print_field(buffer->payload.standard, "%x, %x",
+ packet->payload.tma.ctc, packet->payload.tma.fc);
+
+ if (options->track_time)
+ track_tma(buffer, tracking, offset,
+ &packet->payload.tma, options, config);
+
+ if (options->no_timing)
+ buffer->skip = 1;
+
+ return 0;
+
+ case ppt_mtc:
+ print_field(buffer->opcode, "mtc");
+ print_field(buffer->payload.standard, "%x",
+ packet->payload.mtc.ctc);
+
+ if (options->track_time)
+ track_mtc(buffer, tracking, offset,
+ &packet->payload.mtc, options, config);
+
+ if (options->no_timing)
+ buffer->skip = 1;
+
+ return 0;
+
+ case ppt_cyc:
+ print_field(buffer->opcode, "cyc");
+ print_field(buffer->payload.standard, "%" PRIx64,
+ packet->payload.cyc.value);
+
+ if (options->track_time && !options->no_cyc)
+ track_cyc(buffer, tracking, offset,
+ &packet->payload.cyc, options, config);
+
+ if (options->no_timing || options->no_cyc)
+ buffer->skip = 1;
+
+ return 0;
+
+ case ppt_mnt:
+ print_field(buffer->opcode, "mnt");
+ print_field(buffer->payload.standard, "%" PRIx64,
+ packet->payload.mnt.payload);
+ return 0;
+
+ case ppt_exstop:
+ print_field(buffer->opcode, "exstop");
+ print_field(buffer->payload.standard, "%s",
+ packet->payload.exstop.ip ? "ip" : "");
+ return 0;
+
+ case ppt_mwait:
+ print_field(buffer->opcode, "mwait");
+ print_field(buffer->payload.standard, "%08x, %08x",
+ packet->payload.mwait.hints,
+ packet->payload.mwait.ext);
+ return 0;
+
+ case ppt_pwre:
+ print_field(buffer->opcode, "pwre");
+ print_field(buffer->payload.standard, "c%u.%u%s",
+ (packet->payload.pwre.state + 1) & 0xf,
+ (packet->payload.pwre.sub_state + 1) & 0xf,
+ packet->payload.pwre.hw ? ", hw" : "");
+ return 0;
+
+ case ppt_pwrx: {
+ const char *wr;
+
+ wr = print_pwrx_wr(&packet->payload.pwrx);
+ if (!wr)
+ wr = "bad";
+
+ print_field(buffer->opcode, "pwrx");
+ print_field(buffer->payload.standard, "%s: c%u, c%u", wr,
+ (packet->payload.pwrx.last + 1) & 0xf,
+ (packet->payload.pwrx.deepest + 1) & 0xf);
+ return 0;
+ }
+
+ case ppt_ptw:
+ print_field(buffer->opcode, "ptw");
+ print_field(buffer->payload.standard, "%x: %" PRIx64 "%s",
+ packet->payload.ptw.plc,
+ packet->payload.ptw.payload,
+ packet->payload.ptw.ip ? ", ip" : "");
+
+ return 0;
+ }
+
+ return diag("unknown packet", offset, -pte_bad_opc);
+}
+
+static int dump_one_packet(uint64_t offset, const struct pt_packet *packet,
+ struct ptdump_tracking *tracking,
+ const struct ptdump_options *options,
+ const struct pt_config *config)
+{
+ struct ptdump_buffer buffer;
+ int errcode;
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ print_field(buffer.offset, "%016" PRIx64, offset);
+
+ if (options->show_raw_bytes) {
+ errcode = print_raw(&buffer, offset, packet, config);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ errcode = print_packet(&buffer, offset, packet, tracking, options,
+ config);
+ if (errcode < 0)
+ return errcode;
+
+ return print_buffer(&buffer, offset, options);
+}
+
+static int dump_packets(struct pt_packet_decoder *decoder,
+ struct ptdump_tracking *tracking,
+ const struct ptdump_options *options,
+ const struct pt_config *config)
+{
+ uint64_t offset;
+ int errcode;
+
+ offset = 0ull;
+ for (;;) {
+ struct pt_packet packet;
+
+ errcode = pt_pkt_get_offset(decoder, &offset);
+ if (errcode < 0)
+ return diag("error getting offset", offset, errcode);
+
+ errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
+ if (errcode < 0) {
+ if (errcode == -pte_eos)
+ return 0;
+
+ return diag("error decoding packet", offset, errcode);
+ }
+
+ errcode = dump_one_packet(offset, &packet, tracking, options,
+ config);
+ if (errcode < 0)
+ return errcode;
+ }
+}
+
+static int dump_sync(struct pt_packet_decoder *decoder,
+ struct ptdump_tracking *tracking,
+ const struct ptdump_options *options,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ if (!options)
+ return diag("setup error", 0ull, -pte_internal);
+
+ if (options->no_sync) {
+ errcode = pt_pkt_sync_set(decoder, 0ull);
+ if (errcode < 0)
+ return diag("sync error", 0ull, errcode);
+ } else {
+ errcode = pt_pkt_sync_forward(decoder);
+ if (errcode < 0) {
+ if (errcode == -pte_eos)
+ return 0;
+
+ return diag("sync error", 0ull, errcode);
+ }
+ }
+
+ for (;;) {
+ errcode = dump_packets(decoder, tracking, options, config);
+ if (!errcode)
+ break;
+
+ errcode = pt_pkt_sync_forward(decoder);
+ if (errcode < 0) {
+ if (errcode == -pte_eos)
+ return 0;
+
+ return diag("sync error", 0ull, errcode);
+ }
+
+ ptdump_tracking_reset(tracking);
+ }
+
+ return errcode;
+}
+
+static int dump(struct ptdump_tracking *tracking,
+ const struct pt_config *config,
+ const struct ptdump_options *options)
+{
+ struct pt_packet_decoder *decoder;
+ int errcode;
+
+ decoder = pt_pkt_alloc_decoder(config);
+ if (!decoder)
+ return diag("failed to allocate decoder", 0ull, 0);
+
+ errcode = dump_sync(decoder, tracking, options, config);
+
+ pt_pkt_free_decoder(decoder);
+
+ if (errcode < 0)
+ return errcode;
+
+#if defined(FEATURE_SIDEBAND)
+ errcode = pt_sb_dump(tracking->session, stdout, options->sb_dump_flags,
+ UINT64_MAX);
+ if (errcode < 0)
+ return diag("sideband dump error", UINT64_MAX, errcode);
+#endif
+
+ return 0;
+}
+
+#if defined(FEATURE_SIDEBAND)
+
+static int ptdump_print_error(int errcode, const char *filename,
+ uint64_t offset, void *priv)
+{
+ const struct ptdump_options *options;
+ const char *errstr;
+
+ options = (struct ptdump_options *) priv;
+ if (!options)
+ return -pte_internal;
+
+ if (errcode >= 0 && !options->print_sb_warnings)
+ return 0;
+
+ if (!filename)
+ filename = "<unknown>";
+
+ errstr = errcode < 0
+ ? pt_errstr(pt_errcode(errcode))
+ : pt_sb_errstr((enum pt_sb_error_code) errcode);
+
+ if (!errstr)
+ errstr = "<unknown error>";
+
+ printf("[%s:%016" PRIx64 " sideband error: %s]\n", filename, offset,
+ errstr);
+
+ return 0;
+}
+
+#if defined(FEATURE_PEVENT)
+
+static int ptdump_sb_pevent(struct pt_sb_session *session, char *filename,
+ const struct pt_sb_pevent_config *conf,
+ const char *prog)
+{
+ struct pt_sb_pevent_config config;
+ uint64_t foffset, fsize, fend;
+ int errcode;
+
+ if (!conf || !prog) {
+ fprintf(stderr, "%s: internal error.\n", prog ? prog : "");
+ return -1;
+ }
+
+ errcode = preprocess_filename(filename, &foffset, &fsize);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: bad file %s: %s.\n", prog, filename,
+ pt_errstr(pt_errcode(errcode)));
+ return -1;
+ }
+
+ if (SIZE_MAX < foffset) {
+ fprintf(stderr,
+ "%s: bad offset: 0x%" PRIx64 ".\n", prog, foffset);
+ return -1;
+ }
+
+ config = *conf;
+ config.filename = filename;
+ config.begin = (size_t) foffset;
+ config.end = 0;
+
+ if (fsize) {
+ fend = foffset + fsize;
+ if ((fend <= foffset) || (SIZE_MAX < fend)) {
+ fprintf(stderr,
+ "%s: bad range: 0x%" PRIx64 "-0x%" PRIx64 ".\n",
+ prog, foffset, fend);
+ return -1;
+ }
+
+ config.end = (size_t) fend;
+ }
+
+ errcode = pt_sb_alloc_pevent_decoder(session, &config);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: error loading %s: %s.\n", prog, filename,
+ pt_errstr(pt_errcode(errcode)));
+ return -1;
+ }
+
+ return 0;
+}
+
+#endif /* defined(FEATURE_PEVENT) */
+#endif /* defined(FEATURE_SIDEBAND) */
+
+static int get_arg_uint64(uint64_t *value, const char *option, const char *arg,
+ const char *prog)
+{
+ char *rest;
+
+ if (!value || !option || !prog) {
+ fprintf(stderr, "%s: internal error.\n", prog ? prog : "?");
+ return 0;
+ }
+
+ if (!arg || arg[0] == 0 || (arg[0] == '-' && arg[1] == '-')) {
+ fprintf(stderr, "%s: %s: missing argument.\n", prog, option);
+ return 0;
+ }
+
+ errno = 0;
+ *value = strtoull(arg, &rest, 0);
+ if (errno || *rest) {
+ fprintf(stderr, "%s: %s: bad argument: %s.\n", prog, option,
+ arg);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int get_arg_uint32(uint32_t *value, const char *option, const char *arg,
+ const char *prog)
+{
+ uint64_t val;
+
+ if (!get_arg_uint64(&val, option, arg, prog))
+ return 0;
+
+ if (val > UINT32_MAX) {
+ fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option,
+ arg);
+ return 0;
+ }
+
+ *value = (uint32_t) val;
+
+ return 1;
+}
+
+#if defined(FEATURE_SIDEBAND) && defined(FEATURE_PEVENT)
+
+static int get_arg_uint16(uint16_t *value, const char *option, const char *arg,
+ const char *prog)
+{
+ uint64_t val;
+
+ if (!get_arg_uint64(&val, option, arg, prog))
+ return 0;
+
+ if (val > UINT16_MAX) {
+ fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option,
+ arg);
+ return 0;
+ }
+
+ *value = (uint16_t) val;
+
+ return 1;
+}
+
+#endif /* defined(FEATURE_SIDEBAND) && defined(FEATURE_PEVENT) */
+
+static int get_arg_uint8(uint8_t *value, const char *option, const char *arg,
+ const char *prog)
+{
+ uint64_t val;
+
+ if (!get_arg_uint64(&val, option, arg, prog))
+ return 0;
+
+ if (val > UINT8_MAX) {
+ fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option,
+ arg);
+ return 0;
+ }
+
+ *value = (uint8_t) val;
+
+ return 1;
+}
+
+static int process_args(int argc, char *argv[],
+ struct ptdump_tracking *tracking,
+ struct ptdump_options *options,
+ struct pt_config *config, char **ptfile)
+{
+#if defined(FEATURE_SIDEBAND) && defined(FEATURE_PEVENT)
+ struct pt_sb_pevent_config pevent;
+#endif
+ int idx, errcode;
+
+ if (!argv || !tracking || !options || !config || !ptfile) {
+ fprintf(stderr, "%s: internal error.\n", argv ? argv[0] : "");
+ return -1;
+ }
+
+#if defined(FEATURE_SIDEBAND) && defined(FEATURE_PEVENT)
+ memset(&pevent, 0, sizeof(pevent));
+ pevent.size = sizeof(pevent);
+ pevent.time_mult = 1;
+#endif
+ for (idx = 1; idx < argc; ++idx) {
+ if (strncmp(argv[idx], "-", 1) != 0) {
+ *ptfile = argv[idx];
+ if (idx < (argc-1))
+ return usage(argv[0]);
+ break;
+ }
+
+ if (strcmp(argv[idx], "-h") == 0)
+ return help(argv[0]);
+ if (strcmp(argv[idx], "--help") == 0)
+ return help(argv[0]);
+ if (strcmp(argv[idx], "--version") == 0)
+ return version(argv[0]);
+ if (strcmp(argv[idx], "--no-sync") == 0)
+ options->no_sync = 1;
+ else if (strcmp(argv[idx], "--quiet") == 0) {
+ options->quiet = 1;
+#if defined(FEATURE_SIDEBAND)
+ options->sb_dump_flags = 0;
+#endif
+ } else if (strcmp(argv[idx], "--no-pad") == 0)
+ options->no_pad = 1;
+ else if (strcmp(argv[idx], "--no-timing") == 0)
+ options->no_timing = 1;
+ else if (strcmp(argv[idx], "--no-cyc") == 0)
+ options->no_cyc = 1;
+ else if (strcmp(argv[idx], "--no-offset") == 0)
+ options->show_offset = 0;
+ else if (strcmp(argv[idx], "--raw") == 0)
+ options->show_raw_bytes = 1;
+ else if (strcmp(argv[idx], "--lastip") == 0)
+ options->show_last_ip = 1;
+ else if (strcmp(argv[idx], "--exec-mode") == 0)
+ options->show_exec_mode = 1;
+ else if (strcmp(argv[idx], "--time") == 0) {
+ if (options->show_tcal) {
+ fprintf(stderr, "%s: specify either --time "
+ "or --tcal.\n", argv[0]);
+ return -1;
+ }
+
+ options->track_time = 1;
+ options->show_time = 1;
+ } else if (strcmp(argv[idx], "--time-delta") == 0) {
+ options->show_time_as_delta = 1;
+ } else if (strcmp(argv[idx], "--tcal") == 0) {
+ if (options->show_time) {
+ fprintf(stderr, "%s: specify either --time "
+ "or --tcal.\n", argv[0]);
+ return -1;
+ }
+
+ options->track_time = 1;
+ options->show_tcal = 1;
+ } else if (strcmp(argv[idx], "--no-tcal") == 0)
+ options->no_tcal = 1;
+ else if (strcmp(argv[idx], "--no-wall-clock") == 0)
+ options->no_wall_clock = 1;
+#if defined(FEATURE_SIDEBAND)
+ else if ((strcmp(argv[idx], "--sb:compact") == 0) ||
+ (strcmp(argv[idx], "--sb") == 0)) {
+ options->sb_dump_flags &= ~ptsbp_verbose;
+ options->sb_dump_flags |= ptsbp_compact;
+ } else if (strcmp(argv[idx], "--sb:verbose") == 0) {
+ options->sb_dump_flags &= ~ptsbp_compact;
+ options->sb_dump_flags |= ptsbp_verbose;
+ } else if (strcmp(argv[idx], "--sb:filename") == 0)
+ options->sb_dump_flags |= ptsbp_filename;
+ else if (strcmp(argv[idx], "--sb:offset") == 0)
+ options->sb_dump_flags |= ptsbp_file_offset;
+ else if (strcmp(argv[idx], "--sb:time") == 0)
+ options->sb_dump_flags |= ptsbp_tsc;
+ else if (strcmp(argv[idx], "--sb:warn") == 0)
+ options->print_sb_warnings = 1;
+#if defined(FEATURE_PEVENT)
+ else if ((strcmp(argv[idx], "--pevent") == 0) ||
+ (strcmp(argv[idx], "--pevent:primary") == 0) ||
+ (strcmp(argv[idx], "--pevent:secondary") == 0)) {
+ char *arg;
+
+ arg = argv[++idx];
+ if (!arg) {
+ fprintf(stderr,
+ "%s: %s: missing argument.\n",
+ argv[0], argv[idx-1]);
+ return -1;
+ }
+
+ errcode = ptdump_sb_pevent(tracking->session, arg,
+ &pevent, argv[0]);
+ if (errcode < 0)
+ return -1;
+
+ /* We need to keep track of time for sideband
+ * correlation.
+ */
+ options->track_time = 1;
+ } else if (strcmp(argv[idx], "--pevent:sample-type") == 0) {
+ if (!get_arg_uint64(&pevent.sample_type,
+ "--pevent:sample-type",
+ argv[++idx], argv[0]))
+ return -1;
+ } else if (strcmp(argv[idx], "--pevent:time-zero") == 0) {
+ if (!get_arg_uint64(&pevent.time_zero,
+ "--pevent:time-zero",
+ argv[++idx], argv[0]))
+ return -1;
+ } else if (strcmp(argv[idx], "--pevent:time-shift") == 0) {
+ if (!get_arg_uint16(&pevent.time_shift,
+ "--pevent:time-shift",
+ argv[++idx], argv[0]))
+ return -1;
+ } else if (strcmp(argv[idx], "--pevent:time-mult") == 0) {
+ if (!get_arg_uint32(&pevent.time_mult,
+ "--pevent:time-mult",
+ argv[++idx], argv[0]))
+ return -1;
+ } else if (strcmp(argv[idx], "--pevent:tsc-offset") == 0) {
+ if (!get_arg_uint64(&pevent.tsc_offset,
+ "--pevent:tsc-offset",
+ argv[++idx], argv[0]))
+ return -1;
+ } else if (strcmp(argv[idx], "--pevent:kernel-start") == 0) {
+ if (!get_arg_uint64(&pevent.kernel_start,
+ "--pevent:kernel-start",
+ argv[++idx], argv[0]))
+ return -1;
+ } else if ((strcmp(argv[idx], "--pevent:sysroot") == 0) ||
+ (strcmp(argv[idx], "--pevent:kcore") == 0) ||
+ (strcmp(argv[idx], "--pevent:vdso-x64") == 0) ||
+ (strcmp(argv[idx], "--pevent:vdso-x32") == 0) ||
+ (strcmp(argv[idx], "--pevent:vdso-ia32") == 0)) {
+ char *arg;
+
+ arg = argv[++idx];
+ if (!arg) {
+ fprintf(stderr,
+ "%s: %s: missing argument.\n",
+ argv[0], argv[idx-1]);
+ return -1;
+ }
+
+ /* Ignore. */
+ }
+#endif /* defined(FEATURE_PEVENT) */
+#endif /* defined(FEATURE_SIDEBAND) */
+ else if (strcmp(argv[idx], "--cpu") == 0) {
+ const char *arg;
+
+ arg = argv[++idx];
+ if (!arg) {
+ fprintf(stderr,
+ "%s: --cpu: missing argument.\n",
+ argv[0]);
+ return -1;
+ }
+
+ if (strcmp(arg, "auto") == 0) {
+ errcode = pt_cpu_read(&config->cpu);
+ if (errcode < 0) {
+ fprintf(stderr,
+ "%s: error reading cpu: %s.\n",
+ argv[0],
+ pt_errstr(pt_errcode(errcode)));
+ return -1;
+ }
+ continue;
+ }
+
+ if (strcmp(arg, "none") == 0) {
+ memset(&config->cpu, 0, sizeof(config->cpu));
+ continue;
+ }
+
+ errcode = pt_cpu_parse(&config->cpu, arg);
+ if (errcode < 0) {
+ fprintf(stderr,
+ "%s: cpu must be specified as f/m[/s]\n",
+ argv[0]);
+ return -1;
+ }
+ } else if (strcmp(argv[idx], "--mtc-freq") == 0) {
+ if (!get_arg_uint8(&config->mtc_freq, "--mtc-freq",
+ argv[++idx], argv[0]))
+ return -1;
+ } else if (strcmp(argv[idx], "--nom-freq") == 0) {
+ if (!get_arg_uint8(&config->nom_freq, "--nom-freq",
+ argv[++idx], argv[0]))
+ return -1;
+ } else if (strcmp(argv[idx], "--cpuid-0x15.eax") == 0) {
+ if (!get_arg_uint32(&config->cpuid_0x15_eax,
+ "--cpuid-0x15.eax", argv[++idx],
+ argv[0]))
+ return -1;
+ } else if (strcmp(argv[idx], "--cpuid-0x15.ebx") == 0) {
+ if (!get_arg_uint32(&config->cpuid_0x15_ebx,
+ "--cpuid-0x15.ebx", argv[++idx],
+ argv[0]))
+ return -1;
+ } else
+ return unknown_option_error(argv[idx], argv[0]);
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ struct ptdump_tracking tracking;
+ struct ptdump_options options;
+ struct pt_config config;
+ int errcode;
+ char *ptfile;
+ uint64_t pt_offset, pt_size;
+
+ ptfile = NULL;
+
+ memset(&options, 0, sizeof(options));
+ options.show_offset = 1;
+
+ memset(&config, 0, sizeof(config));
+ pt_config_init(&config);
+
+ ptdump_tracking_init(&tracking);
+
+#if defined(FEATURE_SIDEBAND)
+ tracking.session = pt_sb_alloc(NULL);
+ if (!tracking.session) {
+ fprintf(stderr,
+ "%s: failed to allocate sideband session.\n", argv[0]);
+ errcode = -pte_nomem;
+ goto out;
+ }
+
+ pt_sb_notify_error(tracking.session, ptdump_print_error, &options);
+#endif /* defined(FEATURE_SIDEBAND) */
+
+ errcode = process_args(argc, argv, &tracking, &options, &config,
+ &ptfile);
+ if (errcode != 0) {
+ if (errcode > 0)
+ errcode = 0;
+ goto out;
+ }
+
+ if (!ptfile) {
+ errcode = no_file_error(argv[0]);
+ goto out;
+ }
+
+ errcode = preprocess_filename(ptfile, &pt_offset, &pt_size);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: bad file %s: %s.\n", argv[0], ptfile,
+ pt_errstr(pt_errcode(errcode)));
+ goto out;
+ }
+
+ if (config.cpu.vendor) {
+ errcode = pt_cpu_errata(&config.errata, &config.cpu);
+ if (errcode < 0)
+ diag("failed to determine errata", 0ull, errcode);
+ }
+
+ errcode = load_pt(&config, ptfile, pt_offset, pt_size, argv[0]);
+ if (errcode < 0)
+ goto out;
+
+#if defined(FEATURE_SIDEBAND)
+ errcode = pt_sb_init_decoders(tracking.session);
+ if (errcode < 0) {
+ fprintf(stderr,
+ "%s: error initializing sideband decoders: %s.\n",
+ argv[0], pt_errstr(pt_errcode(errcode)));
+ goto out;
+ }
+#endif /* defined(FEATURE_SIDEBAND) */
+
+ errcode = dump(&tracking, &config, &options);
+
+out:
+ free(config.begin);
+ ptdump_tracking_fini(&tracking);
+
+ return -errcode;
+}
diff --git a/pttc/CMakeLists.txt b/pttc/CMakeLists.txt
new file mode 100644
index 000000000000..e569355a6fc8
--- /dev/null
+++ b/pttc/CMakeLists.txt
@@ -0,0 +1,68 @@
+# Copyright (c) 2013-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+include_directories(
+ include
+ ../libipt/internal/include
+)
+
+set(PTTC_FILES
+ src/errcode.c
+ src/file.c
+ src/parse.c
+ src/pttc.c
+ src/util.c
+ src/yasm.c
+ ../libipt/src/pt_cpu.c
+)
+
+if (CMAKE_HOST_UNIX)
+ set(PTTC_FILES
+ ${PTTC_FILES}
+ src/posix/util.c
+ ../libipt/src/posix/pt_cpuid.c
+ )
+endif (CMAKE_HOST_UNIX)
+
+if (CMAKE_HOST_WIN32)
+ set(PTTC_FILES
+ ${PTTC_FILES}
+ src/windows/util.c
+ ../libipt/src/windows/pt_cpuid.c
+ )
+endif (CMAKE_HOST_WIN32)
+
+add_executable(pttc
+ ${PTTC_FILES}
+
+ src/main.c
+)
+
+target_link_libraries(pttc libipt)
+
+if (PEVENT)
+ target_link_libraries(pttc pevent)
+endif (PEVENT)
diff --git a/pttc/include/errcode.h b/pttc/include/errcode.h
new file mode 100644
index 000000000000..4912825284a3
--- /dev/null
+++ b/pttc/include/errcode.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ERRCODE_H
+#define ERRCODE_H
+
+/* Error codes. */
+enum errcode {
+ success,
+
+ err_file_open,
+ err_file_read,
+ err_file_size,
+ err_file_write,
+ err_out_of_range,
+
+ err_label_addr,
+ err_no_org_directive,
+ err_no_directive,
+ err_no_label,
+ err_label_name,
+ err_label_not_unique,
+
+ err_section_no_name,
+ err_section_attribute_no_value,
+ err_section_unknown_attribute,
+
+ err_missing_closepar,
+ err_missing_openpar,
+
+ err_parse,
+ err_parse_int,
+ err_parse_int_too_big,
+ err_parse_ipc,
+ err_parse_ip_missing,
+ err_parse_no_args,
+ err_parse_trailing_tokens,
+ err_parse_unknown_char,
+ err_parse_unknown_directive,
+ err_parse_missing_directive,
+
+ err_parse_c_state_sub,
+ err_parse_c_state_invalid,
+
+ err_sb_missing,
+ err_sb_mix,
+ err_sb_final,
+
+ err_pt_lib,
+
+ err_run,
+
+ err_other,
+
+ err_no_mem,
+
+ /* Used for all invalid function arguments. */
+ err_internal,
+
+ /* Special return value used in p_process to signal that the
+ * rest of the file should go into a .exp file.
+ */
+ stop_process,
+
+ /* Maximum error code.
+ *
+ * This must always be the last element in the enum.
+ * It must not be used as error code.
+ */
+ err_max
+};
+
+/* Map error codes to descriptions.
+ *
+ * Note, all error codes, that are returned by functions, are negative,
+ * so usually error codes must be negated when accessing this array.
+ */
+extern const char *errstr[];
+
+#endif /* ERRCODE_H */
diff --git a/pttc/include/file.h b/pttc/include/file.h
new file mode 100644
index 000000000000..d6c64f4de080
--- /dev/null
+++ b/pttc/include/file.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FILE_H
+#define FILE_H
+
+#include <stddef.h>
+
+/* Provides linewise access to a string.
+ * Access to the lines is guarded by the text_line function.
+ */
+struct text {
+ /* Number of lines. */
+ size_t n;
+
+ /* Each line[0] to line[n-1] points to the start of the
+ * corresponding line.
+ */
+ char **line;
+};
+
+/* Allocates new text.
+ *
+ * Note, if s is NULL or the empty string the text has zero lines.
+ *
+ * Returns a non-NULL text object on success; NULL otherwise.
+ */
+extern struct text *text_alloc(const char *s);
+
+/* Deallocates @t.
+ * If @t is the NULL pointer, nothing happens.
+ */
+extern void text_free(struct text *t);
+
+/* Initializes @t with @s. All "\n" or "\r\n" lineendings, will be
+ * replaced with '\0'.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @t is the NULL pointer.
+ */
+extern int text_parse(struct text *t, const char *s);
+
+/* Copies at most @destlen characters of line @n from text @t to @dest.
+ * The line counts start with 0.
+ * If @dest is the NULL pointer just the line number is checked.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @t is the NULL pointer or if @dest is the
+ * NULL pointer, but @destlen is non-zero.
+ * Returns -err_out_of_range if @n is not in the range.
+ *
+ * Note, the string is always null byte terminated on success.
+ */
+extern int text_line(const struct text *t, char *dest, size_t destlen,
+ size_t n);
+
+/* Provides access to lines of files. Access to all files is cached
+ * after the first request.
+ *
+ * By convention, the first file_list element in the list is the head
+ * and stores no file information.
+ */
+struct file_list {
+ /* Name of the file. */
+ char *filename;
+
+ /* The content of the file. */
+ struct text *text;
+
+ /* Points to the next file list entry. It's NULL if the
+ * current file_list is the last entry in the list.
+ */
+ struct file_list *next;
+};
+
+/* Allocates a new file list.
+ *
+ * Returns a non-NULL file list object on succes; NULL otherwise.
+ */
+extern struct file_list *fl_alloc(void);
+
+/* Deallocates @fl.
+ * If @fl is the NULL pointer, nothing happens.
+ */
+extern void fl_free(struct file_list *fl);
+
+/* Looks up line @n in a file @filename. The line content is stored in
+ * @dest, which should have a capacity of @destlen.
+ * If @dest is the NULL pointer just the line number is checked.
+ * See function text_line how the line is copied to @dest.
+ * The file @filename is loaded implicitly.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @fl or @filename is the NULL pointer or if
+ * @dest is the NULL pointer, but @destlen is non-zero.
+ * Returns -err_out_of_range if n is not a valid line number.
+ * Returns -err_file_stat if @filename could not be found.
+ * Returns -err_file_open if @filename could not be opened.
+ * Returns -err_file_read if the content of @filename could not be fully
+ * read.
+ */
+extern int fl_getline(struct file_list *fl, char *dest, size_t destlen,
+ const char *filename, size_t n);
+
+/* Looks up the text for @filename and stores its contents in @t.
+ * The file @filename is loaded implicitly.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @fl or @t or @filename is the NULL pointer.
+ * Returns -err_file_stat if @filename could not be found.
+ * Returns -err_file_open if @filename could not be opened.
+ * Returns -err_file_read if the content of @filename could not be fully
+ * read.
+ */
+extern int fl_gettext(struct file_list *fl, const struct text **t,
+ const char *filename);
+
+#endif /* FILE_H */
diff --git a/pttc/include/parse.h b/pttc/include/parse.h
new file mode 100644
index 000000000000..08c1f7e5c27b
--- /dev/null
+++ b/pttc/include/parse.h
@@ -0,0 +1,235 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PARSE_H
+#define PARSE_H
+
+#include "yasm.h"
+
+#include "intel-pt.h"
+
+#if defined(FEATURE_PEVENT)
+# include "pevent.h"
+#endif /* defined(FEATURE_PEVENT) */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+
+#if defined(FEATURE_SIDEBAND)
+
+/* The sideband format. */
+enum sb_format {
+ sbf_raw,
+
+#if defined(FEATURE_PEVENT)
+ sbf_pevent,
+#endif /* defined(FEATURE_PEVENT) */
+};
+
+/* A sideband file. */
+struct sb_file {
+ /* The file name. */
+ char *name;
+
+ /* The file pointer. */
+ FILE *file;
+
+ /* The sideband format. */
+ enum sb_format format;
+
+ /* The number of bytes written into the sideband file. */
+ int bytes_written;
+
+ /* Format-specific information. */
+ union {
+ /* A dummy entry. */
+ uint64_t dummy;
+
+#if defined(FEATURE_PEVENT)
+ /* format = sbf_pevent. */
+ struct {
+ /* The perf_event sideband configuration. */
+ struct pev_config config;
+
+ /* If set, the configuration can't be changed. */
+ uint32_t is_final:1;
+ } pevent;
+#endif /* defined(FEATURE_PEVENT) */
+ } variant;
+};
+
+/* A list of sideband files. */
+struct sb_filelist {
+ /* The next file in the list. */
+ struct sb_filelist *next;
+
+ /* The sideband file. */
+ struct sb_file sbfile;
+};
+
+#endif /* defined(FEATURE_SIDEBAND) */
+
+/* Represents the parser. */
+struct parser {
+ /* File pointer to the trace output file. */
+ FILE *ptfile;
+
+ /* Filename of the trace output file. The filename is
+ * determined from the .asm file given during p_alloc.
+ */
+ char *ptfilename;
+
+#if defined(FEATURE_SIDEBAND)
+ /* A list of open sideband files. */
+ struct sb_filelist *sbfiles;
+
+ /* The currently active sideband file. */
+ struct sb_file *current_sbfile;
+#endif /* defined(FEATURE_SIDEBAND) */
+
+ /* The yasm structure, initialized with pttfile in p_alloc. */
+ struct yasm *y;
+
+ /* Current pt directive. */
+ struct pt_directive *pd;
+
+ /* The encoder configuration, passed during p_alloc. */
+ const struct pt_config *conf;
+
+ /* Labels for @pt or @sb directives. */
+ struct label *pt_labels;
+
+ /* Number of bytes written to pt file. */
+ int pt_bytes_written;
+};
+
+/* Instantiates a parser and starts parsing of @pttfile and writes PT
+ * stream using @conf.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ */
+extern int parse(const char *pttfile, const struct pt_config *conf);
+
+/* Parses an empty payload.
+ *
+ * Returns 0 on success; a negative enum errcode othewise.
+ * Returns -err_parse_trailing_tokens if @payload has non whitespace
+ * characters.
+ */
+extern int parse_empty(char *payload);
+
+/* Parses tnt @payload. Takens are expressed with 't' and Not-Takens
+ * with 'n'. The t's and n's can be separated with spaces, periods or
+ * directly concatenated.
+ *
+ * On success the TNT bitfield will be stored in the location of @tnt; the
+ * number of T's and N's is stored in the location of @size.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @payload or @tnt or @size is the NULL
+ * pointer.
+ * Returns -err_parse_unknown_char if there is an unrecognized character
+ * in the payload.
+ */
+extern int parse_tnt(uint64_t *tnt, uint8_t *size, char *payload);
+
+/* Parses an address and a ipc from @payload and stores it in the
+ * location of @ip and @ipc respectively. The ipc is separated from the
+ * address with space or comma.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @p or @ip or @ipc is the NULL pointer.
+ * Returns -err_parse_int if ip or ipc in the @payload could not be
+ * parsed as integer.
+ * Returns -err_parse_ipc if the ipc argument is missing or malformed.
+ * Returns -err_parse_trailing_tokens if the @payload contains more than
+ * 2 arguments.
+ */
+extern int parse_ip(struct parser *p, uint64_t *ip,
+ enum pt_ip_compression *ipc, char *payload);
+
+/* Parses a uint64_t value from @payload and stores it in the memory
+ * location where @x points to.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @x is the NULL pointer.
+ * Returns -err_parse_no_args if @payload contains no arguments.
+ * Returns -err_parse_int if @payload cannot be parsed as integer.
+ */
+extern int parse_uint64(uint64_t *x, char *payload);
+
+/* Parses a uint8_t value from @payload and stores it in the memory
+ * location where @x points to.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @x is the NULL pointer.
+ * Returns -err_parse_no_args if @payload contains no arguments.
+ * Returns -err_parse_int if @payload cannot be parsed as integer.
+ * Returns -err_parse_int_too_big if the integer parsed from @payload
+ * cannot be represented in uint8_t.
+ */
+extern int parse_uint8(uint8_t *x, char *payload);
+
+/* Parses a uint16_t value from @payload and stores it in the memory
+ * location where @x points to.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @x is the NULL pointer.
+ * Returns -err_parse_no_args if @payload contains no arguments.
+ * Returns -err_parse_int if @payload cannot be parsed as integer.
+ * Returns -err_parse_int_too_big if the integer parsed from @payload
+ * cannot be represented in uint16_t.
+ */
+extern int parse_uint16(uint16_t *x, char *payload);
+
+/* Parses a uint32_t value from @payload and stores it in the memory
+ * location where @x points to.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @x is the NULL pointer.
+ * Returns -err_parse_no_args if @payload contains no arguments.
+ * Returns -err_parse_int if @payload cannot be parsed as integer.
+ * Returns -err_parse_int_too_big if the integer parsed from @payload
+ * cannot be represented in uint32_t.
+ */
+extern int parse_uint32(uint32_t *x, char *payload);
+
+/* Parses the comma-separated ctc and fc arguments of a tma packet.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @ctc or @fc is the NULL pointer.
+ * Returns -err_parse_int if ctc or fc in the @payload could not be
+ * parsed as integer.
+ * Returns -err_parse_trailing_tokens if the @payload contains more than
+ * 2 arguments.
+ */
+extern int parse_tma(uint16_t *ctc, uint16_t *fc, char *payload);
+
+#endif /* PARSE_H */
diff --git a/pttc/include/pttc.h b/pttc/include/pttc.h
new file mode 100644
index 000000000000..9b33ca345a95
--- /dev/null
+++ b/pttc/include/pttc.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PTTC_H
+#define PTTC_H
+
+#include "intel-pt.h"
+
+/* Options that are passed to pttc main. */
+struct pttc_options {
+ /* The cpu that should be used for encoding. */
+ struct pt_cpu cpu;
+
+ /* The input .ptt file. */
+ const char *pttfile;
+};
+
+/* Starts the parsing process with @asmfile.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ */
+extern int pttc_main(const struct pttc_options *options);
+
+#endif /* PTTC_H */
diff --git a/pttc/include/util.h b/pttc/include/util.h
new file mode 100644
index 000000000000..7f097dee6c91
--- /dev/null
+++ b/pttc/include/util.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef UTIL_H
+#define UTIL_H
+
+#include <stdint.h>
+
+/* Duplicates @s and returns a pointer to it.
+ *
+ * The returned pointer must be freed by the caller.
+ *
+ * Returns the pointer to the duplicate on success; otherwise NULL is
+ * returned.
+ */
+extern char *duplicate_str(const char *s);
+
+/* Converts the string @str into an usigned x-bit value @val using base @base.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if either @str or @val is NULL.
+ * Returns -err_parse_int if there was a general parsing error.
+ * Returns -err_parse_int_too_big if parsed value wouldn't fit into x bit.
+ */
+extern int str_to_uint64(const char *str, uint64_t *val, int base);
+extern int str_to_uint32(const char *str, uint32_t *val, int base);
+extern int str_to_uint16(const char *str, uint16_t *val, int base);
+extern int str_to_uint8(const char *str, uint8_t *val, int base);
+
+/* Executes @file and passes @argv as command-line arguments.
+ * The last element in @argv must be NULL.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ */
+extern int run(const char *file, char *const argv[]);
+
+/* Prints condstr, together with file and line, to stderr if cond is not 0.
+ * Please do not use this function directly, use the bug_on convenience
+ * macro.
+ *
+ * Returns cond.
+ */
+extern int do_bug_on(int cond, const char *condstr, const char *file, int line);
+
+/* Convenience macro that wraps cond as condstr and current file and line
+ * for do_bug_on.
+ *
+ * Returns cond.
+ */
+#define bug_on(cond) do_bug_on(cond, #cond, __FILE__, __LINE__)
+
+/* Represents a label list with the corresponding address.
+ *
+ * By convention, the first label in the list is the head and stores
+ * no label information.
+ */
+struct label {
+ /* Labelname. */
+ char *name;
+
+ /* Address associated with the label. */
+ uint64_t addr;
+
+ /* The next label in the list. */
+ struct label *next;
+};
+
+/* Allocates a new label list.
+ *
+ * Returns a non-NULL label list object on success; NULL otherwise.
+ */
+extern struct label *l_alloc(void);
+
+/* Deallocates and clears all elements in the list denoted by @l.
+ * If @l is the NULL pointer, nothing happens.
+ */
+extern void l_free(struct label *l);
+
+/* Appends a label to the last element in @l with @name and @addr.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ */
+extern int l_append(struct label *l, const char *name, uint64_t addr);
+
+/* Looks up the label @name in @l and stores the address where @addr points to.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @l or @addr or @name is the NULL pointer.
+ * Returns -err_no_label if a label with @name does not exist in @l.
+ */
+extern int l_lookup(const struct label *l, uint64_t *addr, const char *name);
+
+/* Find the label @name in @l and return a pointer to it.
+ *
+ * Returns a pointer to the found label on success; NULL otherwise.
+ */
+extern struct label *l_find(struct label *l, const char *name);
+
+#endif /* UTIL_H */
diff --git a/pttc/include/yasm.h b/pttc/include/yasm.h
new file mode 100644
index 000000000000..416d1af119cd
--- /dev/null
+++ b/pttc/include/yasm.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef YASM_H
+#define YASM_H
+
+#include "file.h"
+#include "util.h"
+
+#include <stdint.h>
+
+/* Parses all labels in @t and appends them to @l.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_section if @t contains a "[section]" yasm directive.
+ * Sections are currently not supported.
+ * Returns -err_label_addr if the address for a label could not be
+ * determined.
+ */
+extern int parse_yasm_labels(struct label *l, const struct text *t);
+
+/* Modifies @s, so it can be used as a label, if @s actually looks like
+ * a label.
+ *
+ * Returns true if @s looks like a label; false otherwise.
+ * Returns -err_internal if @l or @name is the NULL pointer.
+ */
+extern int make_label(char *s);
+
+/* Represents the state of the pt directive parser. The parser uses the
+ * canonical yasm lst file syntax to follow all asm source files that
+ * were used during a yasm run. The lst file stores information about
+ * these files in terms of line numbers and line increments. With this
+ * information the contents of the lst file can be correlated to the
+ * actual source files.
+ */
+struct state {
+ /* Current line number. */
+ int n;
+
+ /* Current line increment for this file. */
+ int inc;
+
+ /* Current filename. */
+ char *filename;
+
+ /* Pointer to the current line. */
+ char *line;
+};
+
+/* Allocates new state.
+ *
+ * Returns a non-NULL state object on success; NULL otherwise.
+ */
+extern struct state *st_alloc(void);
+
+/* Deallocates and clears all fields of @st.
+ * If @st is the NULL pointer, nothing happens.
+ */
+extern void st_free(struct state *st);
+
+/* Prints @s to stderr enriched with @st's file and line information.
+ *
+ * Returns @errcode on success.
+ * Returns -err_internal if @st is the NULL pointer or @errcode is
+ * not negative.
+ */
+extern int st_print_err(const struct state *st, const char *s, int errcode);
+
+/* The kind of directive: Intel PT or sideband. */
+enum pt_directive_kind {
+ pdk_pt,
+#if defined(FEATURE_SIDEBAND)
+ pdk_sb,
+#endif
+};
+
+/* Represents a pt directive with name and payload. */
+struct pt_directive {
+ /* The kind of the directive. */
+ enum pt_directive_kind kind;
+
+ /* Name of the directive. */
+ char *name;
+
+ /* Length of name. */
+ size_t nlen;
+
+ /* Everything between the '(' and ')' in the directive. */
+ char *payload;
+
+ /* Length of payoad. */
+ size_t plen;
+};
+
+/* Allocates a new pt directive that can hold a directive name and
+ * payload of no more than @n characters.
+ *
+ * Returns a non-NULL pt directive object on success; NULL otherwise.
+ */
+extern struct pt_directive *pd_alloc(size_t n);
+
+/* Deallocates and clears all fields of @pd.
+ * If @pd is the NULL pointer, nothing happens.
+ */
+extern void pd_free(struct pt_directive *pd);
+
+/* Copies @kind, @name and @payload to the corresponding fields in @pd.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @pd or @name or @payload is the NULL
+ * pointer.
+ */
+extern int pd_set(struct pt_directive *pd, enum pt_directive_kind kind,
+ const char *name, const char *payload);
+
+/* Parses a pt directive from @st and stores it in @pd.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @pd or @st is the NULL pointer.
+ */
+extern int pd_parse(struct pt_directive *pd, struct state *st);
+
+/* Represents a yasm assembled file. */
+struct yasm {
+ /* Filename of the .asm file. */
+ char *pttfile;
+
+ /* Filename of the .lst file. It is the concatenation of
+ * fileroot and ".lst".
+ */
+ char *lstfile;
+
+ /* Filename of the .bin file. It is the concatenation of
+ * fileroot and ".bin".
+ */
+ char *binfile;
+
+ /* Fileroot is the pttfile filename, but with a trailing file
+ * extension removed. It is used to create files based on the
+ * pttfile and is also used to create the .pt and .exp files
+ * during the parsing step.
+ */
+ char *fileroot;
+
+ /* The list of files that are encountered while parsing the
+ * lstfile.
+ */
+ struct file_list *fl;
+
+ /* State of the current assembly file, while parsing the
+ * lstfile.
+ */
+ struct state *st_asm;
+
+ /* Current line number in the lstfile. */
+ int lst_curr_line;
+
+ /* The list of labels found in the lstfile. */
+ struct label *l;
+};
+
+/* Allocates a new yasm container with @pttfile.
+ *
+ * Returns a non-NULL yasm container object on success; NULL otherwise.
+ */
+extern struct yasm *yasm_alloc(const char *pttfile);
+
+/* Deallocates and clears all field of @y.
+ * If @y is the NULL pointer, nothing happens.
+ */
+extern void yasm_free(struct yasm *y);
+
+/* Assembles the pttfile with yasm and parses all labels.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ */
+extern int yasm_parse(struct yasm *y);
+
+/* Looks up @labelname and stores its address in @addr if found.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ */
+extern int yasm_lookup_label(const struct yasm *y, uint64_t *addr,
+ const char *labelname);
+
+/* Looks up the special section label "section_@name_@attribute" and stores
+ * its value in @value if found.
+ *
+ * Valid attributes are:
+ *
+ * - start the section's start address in the binary file
+ * - vstart the section's virtual load address
+ * - length the section's size in bytes
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ */
+extern int yasm_lookup_section_label(const struct yasm *y, const char *name,
+ const char *attribute, uint64_t *value);
+
+/* Stores the next pt directive in @pd.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @y or @pd is the NULL pointer.
+ * Returns -err_no_directive if there is no pt directive left.
+ */
+extern int yasm_next_pt_directive(struct yasm *y, struct pt_directive *pd);
+
+/* Calls pd_parse for the current file and line.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_no_directive if the current source line contains no PT
+ * directive.
+ */
+extern int yasm_pd_parse(struct yasm *y, struct pt_directive *pd);
+
+/* Stores the next line in the asm file into @dest. The memory behind
+ * @dest must be large enough to store @destlen bytes.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @y is the NULL pointer or @dest is NULL, but
+ * @destlen is non-zero.
+ */
+extern int yasm_next_line(struct yasm *y, char *dest, size_t destlen);
+
+/* Prints the error message @s together with errstr[@errcode]. File and
+ * line information are printed regarding the current state of @y.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @errcode is not negative.
+ */
+extern int yasm_print_err(const struct yasm *y, const char *s, int errcode);
+
+#endif /* YASM_H */
diff --git a/pttc/src/errcode.c b/pttc/src/errcode.c
new file mode 100644
index 000000000000..1253b0eaa0aa
--- /dev/null
+++ b/pttc/src/errcode.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "errcode.h"
+
+const char *errstr[] = {
+ "success",
+
+ "cannot open file",
+ "cannot read file",
+ "cannot get file size",
+ "cannot write file",
+ "out of range",
+
+ "label has no address",
+ "yasm directive 'org' is required",
+ "no pt directive",
+ "no such label",
+ "label name is too long",
+ "label name is not unique",
+
+ "failed to find section name",
+ "failed to find value for section attribute",
+ "unknown section attribute",
+
+ "missing ')'",
+ "missing '('",
+
+ "parse error",
+ "integer cannot be parsed",
+ "integer too big",
+ "ipc missing or has invalid value",
+ "ip missing",
+ "no arguments",
+ "trailing tokens",
+ "unknown character",
+ "unknown directive",
+ "missing directive",
+
+ "unexpected sub C-state",
+ "invalid C-state",
+
+ "no open sideband file",
+ "sideband format error",
+ "configuration error",
+
+ "pt library error",
+
+ "run failed",
+
+ "unspecified error",
+
+ "out of memory",
+
+ "internal error",
+
+ "processing stopped",
+
+ "max error code",
+};
diff --git a/pttc/src/file.c b/pttc/src/file.c
new file mode 100644
index 000000000000..a7ce1f3e953b
--- /dev/null
+++ b/pttc/src/file.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "errcode.h"
+#include "file.h"
+#include "util.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+struct text *text_alloc(const char *s)
+{
+ size_t n, i;
+ char **line;
+ struct text *t;
+
+ t = calloc(1, sizeof(struct text));
+ if (!t)
+ return NULL;
+
+ /* If s is NULL or empty, there is nothing to do. */
+ if (!s || *s == '\0')
+ return t;
+
+ /* beginning of s is the first line. */
+ t->n = 1;
+ t->line = calloc(1, sizeof(*t->line));
+ if (!t->line)
+ goto error;
+
+ t->line[0] = duplicate_str(s);
+ if (!t->line[0])
+ goto error;
+
+ /* iterate through all chars and make \r?\n to \0. */
+ n = strlen(t->line[0]);
+ for (i = 0; i < n; i++) {
+ if (t->line[0][i] == '\r') {
+ if (i+1 >= n) {
+ /* the file ends with \r. */
+ t->line[0][i] = '\0';
+ break;
+ }
+ /* terminate the line string if it's a line end. */
+ if (t->line[0][i+1] == '\n')
+ t->line[0][i] = '\0';
+
+ } else if (t->line[0][i] == '\n') {
+ /* set newline character always to \0. */
+ t->line[0][i] = '\0';
+ if (i+1 >= n) {
+ /* the file ends with \n. */
+ break;
+ }
+ /* increase line pointer buffer. */
+ line = realloc(t->line, (t->n+1) * sizeof(*t->line));
+ if (!line)
+ goto error;
+ t->line = line;
+ /* point to the next character after the
+ * newline and increment the number of lines.
+ */
+ t->line[t->n++] = &(t->line[0][i+1]);
+ }
+ }
+
+ return t;
+
+error:
+ text_free(t);
+ return NULL;
+}
+
+void text_free(struct text *t)
+{
+ if (!t)
+ return;
+
+ if (t->line)
+ free(t->line[0]);
+ free(t->line);
+ free(t);
+}
+
+int text_line(const struct text *t, char *dest, size_t destlen, size_t n)
+{
+ if (bug_on(!t))
+ return -err_internal;
+
+ if (bug_on(!dest && destlen))
+ return -err_internal;
+
+ if (n >= t->n)
+ return -err_out_of_range;
+
+ if (!dest)
+ return 0;
+
+ if (!destlen)
+ return -err_internal;
+
+ strncpy(dest, t->line[n], destlen);
+
+ /* Make sure the string is terminated. */
+ dest[destlen-1] = '\0';
+ return 0;
+}
+
+struct file_list *fl_alloc(void)
+{
+ return calloc(1, sizeof(struct file_list));
+}
+
+void fl_free(struct file_list *fl)
+{
+ if (!fl)
+ return;
+
+ fl_free(fl->next);
+ text_free(fl->text);
+ free(fl->filename);
+ free(fl);
+}
+
+/* Appends the @filename to @fl and stores a pointer to the internal
+ * text structure in @t.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @fl or @t is the NULL pointer.
+ * Returns -err_file_stat if @filename could not be found.
+ * Returns -err_file_open if @filename could not be opened.
+ * Returns -err_file_read if the content of @filename could not be fully
+ * read.
+ */
+static int fl_append(struct file_list *fl, struct text **t,
+ const char *filename)
+{
+ int errcode;
+ FILE *f;
+ char *s;
+ long pos;
+ size_t fsize;
+ size_t read;
+
+ if (bug_on(!fl))
+ return -err_internal;
+
+ if (bug_on(!t))
+ return -err_internal;
+
+ if (bug_on(!filename))
+ return -err_internal;
+
+ s = NULL;
+ *t = NULL;
+
+ while (fl->next)
+ fl = fl->next;
+
+ fl->next = fl_alloc();
+ if (!fl->next) {
+ errcode = -err_no_mem;
+ goto error;
+ }
+
+ fl->next->filename = duplicate_str(filename);
+ if (!fl->next->filename) {
+ errcode = -err_no_mem;
+ goto error;
+ }
+
+ errno = 0;
+ f = fopen(filename, "rb");
+ if (!f) {
+ fprintf(stderr, "open %s failed: %s\n",
+ filename, strerror(errno));
+ errcode = -err_file_open;
+ goto error;
+ }
+
+ errcode = fseek(f, 0, SEEK_END);
+ if (errcode) {
+ fprintf(stderr, "%s: failed to seek end: %s\n",
+ filename, strerror(errno));
+ errcode = -err_file_size;
+ goto error_file;
+ }
+
+ pos = ftell(f);
+ if (pos < 0) {
+ fprintf(stderr, "%s: failed to determine file size: %s\n",
+ filename, strerror(errno));
+ errcode = -err_file_size;
+ goto error_file;
+ }
+ fsize = (size_t) pos;
+
+ errcode = fseek(f, 0, SEEK_SET);
+ if (errcode) {
+ fprintf(stderr, "%s: failed to seek begin: %s\n",
+ filename, strerror(errno));
+ errcode = -err_file_size;
+ goto error_file;
+ }
+
+ s = calloc(fsize+1, 1); /* size + 1: space for last null byte. */
+ if (!s) {
+ errcode = -err_no_mem;
+ goto error_file;
+ }
+
+ read = fread(s, 1, fsize, f);
+ fclose(f);
+ if (read != fsize) {
+ fprintf(stderr, "read %s failed\n", filename);
+ errcode = -err_file_read;
+ goto error;
+ }
+
+ *t = text_alloc(s);
+ if (!*t) {
+ errcode = -err_no_mem;
+ goto error;
+ }
+
+ free(s);
+ fl->next->text = *t;
+
+ return 0;
+
+error_file:
+ fclose(f);
+error:
+ /* filename is closed after reading before handling error. */
+ fl_free(fl->next);
+ fl->next = NULL;
+ free(s);
+ text_free(*t);
+ *t = NULL;
+ return errcode;
+}
+
+int fl_getline(struct file_list *fl, char *dest, size_t destlen,
+ const char *filename, size_t n)
+{
+ int errcode;
+ const struct text *t;
+
+ if (bug_on(!fl))
+ return -err_internal;
+
+ errcode = fl_gettext(fl, &t, filename);
+ if (errcode < 0)
+ return errcode;
+
+ return text_line(t, dest, destlen, n);
+}
+
+int fl_gettext(struct file_list *fl, const struct text **t,
+ const char *filename)
+{
+ struct text *tmp;
+ int errcode;
+
+ if (bug_on(!fl))
+ return -err_internal;
+
+ if (bug_on(!t))
+ return -err_internal;
+
+ if (bug_on(!filename))
+ return -err_internal;
+
+ while (fl->next) {
+ fl = fl->next;
+ if (strcmp(fl->filename, filename) == 0) {
+ *t = fl->text;
+ return 0;
+ }
+ }
+ errcode = fl_append(fl, &tmp, filename);
+ if (errcode < 0)
+ return errcode;
+
+ *t = tmp;
+ return 0;
+}
diff --git a/pttc/src/main.c b/pttc/src/main.c
new file mode 100644
index 000000000000..ee3baae43a8e
--- /dev/null
+++ b/pttc/src/main.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pttc.h"
+
+#include "pt_cpu.h"
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+
+/* Prints this tools version number and libipt version number on stdout. */
+static void version(const char *prog)
+{
+ struct pt_version v;
+
+ v = pt_library_version();
+ printf("%s-%d.%d.%d%s / libipt-%" PRIu8 ".%" PRIu8 ".%" PRIu32 "%s\n",
+ prog, PT_VERSION_MAJOR, PT_VERSION_MINOR, PT_VERSION_BUILD,
+ PT_VERSION_EXT, v.major, v.minor, v.build, v.ext);
+}
+
+/* Prints usage information to stdout. */
+static void help(const char *prog)
+{
+ printf("usage: %s [<options>] <pttfile>\n\n"
+ "options:\n"
+ " --help|-h this text.\n"
+ " --version display version information and exit.\n"
+ " --cpu none|auto|f/m[/s] set cpu to the given value and encode according to:\n"
+ " none spec (default)\n"
+ " auto current cpu\n"
+ " f/m[/s] family/model[/stepping]\n"
+ " <pttfile> the annotated yasm input file.\n",
+ prog);
+}
+
+int main(int argc, char *argv[])
+{
+ struct pttc_options options;
+ const char *prog;
+ int errcode, i;
+
+ prog = argv[0];
+ memset(&options, 0, sizeof(options));
+
+ for (i = 1; i < argc;) {
+ const char *arg;
+
+ arg = argv[i++];
+
+ if (strcmp(arg, "--help") == 0 || strcmp(arg, "-h") == 0) {
+ help(prog);
+ return 0;
+ }
+ if (strcmp(arg, "--version") == 0) {
+ version(prog);
+ return 0;
+ }
+ if (strcmp(arg, "--cpu") == 0) {
+ arg = argv[i++];
+
+ if (strcmp(arg, "auto") == 0) {
+ errcode = pt_cpu_read(&options.cpu);
+ if (errcode < 0) {
+ fprintf(stderr,
+ "%s: error reading cpu: %s.\n",
+ prog,
+ pt_errstr(pt_errcode(errcode)));
+ return 1;
+ }
+ continue;
+ }
+
+ if (strcmp(arg, "none") == 0) {
+ memset(&options.cpu, 0, sizeof(options.cpu));
+ continue;
+ }
+
+ errcode = pt_cpu_parse(&options.cpu, arg);
+ if (errcode < 0) {
+ fprintf(stderr,
+ "%s: cpu must be specified as f/m[/s].\n",
+ prog);
+ return 1;
+ }
+ continue;
+ }
+
+ if (arg[0] == '-') {
+ fprintf(stderr, "%s: unrecognized option '%s'.\n",
+ prog, arg);
+ return 1;
+ }
+
+ if (options.pttfile) {
+ fprintf(stderr,
+ "%s: only one pttfile can be specified.\n",
+ prog);
+ return 1;
+ }
+ options.pttfile = arg;
+ }
+
+ if (!options.pttfile) {
+ fprintf(stderr, "%s: no pttfile specified.\n", prog);
+ fprintf(stderr, "Try '%s -h' for more information.\n", prog);
+ return 1;
+ }
+
+ return pttc_main(&options);
+}
diff --git a/pttc/src/parse.c b/pttc/src/parse.c
new file mode 100644
index 000000000000..339d159b466b
--- /dev/null
+++ b/pttc/src/parse.c
@@ -0,0 +1,2779 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "errcode.h"
+#include "parse.h"
+#include "util.h"
+#include "pt_compiler.h"
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#if defined(_MSC_VER) && (_MSC_VER < 1900)
+# define snprintf _snprintf_c
+#endif
+
+
+static const char *pt_suffix = ".pt";
+static const char *exp_suffix = ".exp";
+
+#if defined(FEATURE_SIDEBAND)
+static const char *sb_suffix = ".sb";
+#endif
+
+enum {
+ pd_len = 1024
+};
+
+#if defined(FEATURE_SIDEBAND)
+
+static void sb_rename_file(struct sb_file *sb)
+{
+ char filename[FILENAME_MAX];
+
+ /* We encode the configuration in the sideband filename. */
+ switch (sb->format) {
+ case sbf_raw:
+ strncpy(filename, sb->name, sizeof(filename));
+
+ /* Make sure @filename is terminated. */
+ filename[sizeof(filename) - 1] = 0;
+ break;
+
+#if defined(FEATURE_PEVENT)
+ case sbf_pevent: {
+ const struct pev_config *config;
+ size_t base_len, ext_len, suffix_len, total_len;
+ int errcode, printed;
+ char extension[256];
+
+ config = &sb->variant.pevent.config;
+
+ printed = snprintf(extension, sizeof(extension),
+ ",sample-type=0x%" PRIx64 ",time-zero=0x%"
+ PRIx64 ",time-shift=0x%u" ",time-mult=0x%u",
+ config->sample_type, config->time_zero,
+ config->time_shift, config->time_mult);
+ if (printed < 0) {
+ fprintf(stderr, "error renaming %s.\n", sb->name);
+ return;
+ }
+
+ ext_len = (size_t) printed;
+ suffix_len = strnlen(sb_suffix, sizeof(filename));
+
+ base_len = strnlen(sb->name, sizeof(filename));
+ base_len -= suffix_len;
+
+ total_len = base_len + ext_len + suffix_len + 1;
+ if (sizeof(filename) <= total_len) {
+ fprintf(stderr, "warning: %s could not be renamed.\n",
+ sb->name);
+ return;
+ }
+
+ strncpy(filename, sb->name, base_len);
+
+ printed = snprintf(filename + base_len,
+ sizeof(filename) - base_len, "%s%s",
+ extension, sb_suffix);
+ if (printed < 0) {
+ fprintf(stderr, "error renaming %s.\n", sb->name);
+ return;
+ }
+
+ errno = 0;
+ errcode = rename(sb->name, filename);
+ if (errcode < 0)
+ fprintf(stderr, "error renaming %s: %s.\n",
+ sb->name, strerror(errno));
+ }
+ break;
+#endif /* defined(FEATURE_PEVENT) */
+ }
+
+ /* Print the name of the sideband file for test.bash. */
+ printf("%s\n", filename);
+}
+
+#endif /* defined(FEATURE_SIDEBAND) */
+
+/* Deallocates the memory used by @p, closes all files, clears and
+ * zeroes the fields.
+ */
+static void p_free(struct parser *p)
+{
+ if (!p)
+ return;
+
+ yasm_free(p->y);
+ pd_free(p->pd);
+ l_free(p->pt_labels);
+ free(p->ptfilename);
+
+#if defined(FEATURE_SIDEBAND)
+ {
+ struct sb_filelist *sb;
+
+ sb = p->sbfiles;
+ while (sb) {
+ struct sb_filelist *trash;
+
+ trash = sb;
+ sb = sb->next;
+
+ fclose(trash->sbfile.file);
+
+ sb_rename_file(&trash->sbfile);
+
+ free(trash->sbfile.name);
+ free(trash);
+ }
+ }
+#endif /* defined(FEATURE_SIDEBAND) */
+
+ free(p);
+}
+
+/* Initializes @p with @pttfile and @conf.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @p is the NULL pointer.
+ */
+static struct parser *p_alloc(const char *pttfile, const struct pt_config *conf)
+{
+ size_t n;
+ struct parser *p;
+
+ if (!conf)
+ return NULL;
+
+ if (!pttfile)
+ return NULL;
+
+ p = calloc(1, sizeof(*p));
+ if (!p)
+ return NULL;
+
+ p->y = yasm_alloc(pttfile);
+ if (!p->y)
+ goto error;
+
+ n = strlen(p->y->fileroot) + 1;
+
+ p->ptfilename = malloc(n+strlen(pt_suffix));
+ if (!p->ptfilename)
+ goto error;
+
+ strcpy(p->ptfilename, p->y->fileroot);
+ strcat(p->ptfilename, pt_suffix);
+
+ p->pd = pd_alloc(pd_len);
+ if (!p->pd)
+ goto error;
+
+ p->pt_labels = l_alloc();
+ if (!p->pt_labels)
+ goto error;
+
+ p->conf = conf;
+
+#if defined(FEATURE_SIDEBAND)
+ p->sbfiles = NULL;
+ p->current_sbfile = NULL;
+#endif
+
+ return p;
+
+error:
+ p_free(p);
+ return NULL;
+}
+
+/* Generates an .exp filename following the scheme:
+ * <fileroot>[-<extra>].exp
+ */
+static char *expfilename(struct parser *p, const char *extra)
+{
+ char *filename;
+ /* reserve enough space to hold the string
+ * "-cpu_fffff_mmm_sss" + 1 for the trailing null character.
+ */
+ char cpu_suffix[19];
+ size_t n;
+
+ if (!extra)
+ extra = "";
+ *cpu_suffix = '\0';
+
+ /* determine length of resulting filename, which looks like:
+ * <fileroot>[-<extra>][-cpu_<f>_<m>_<s>].exp
+ */
+ n = strlen(p->y->fileroot);
+
+ if (*extra != '\0')
+ /* the extra string is prepended with a -. */
+ n += 1 + strlen(extra);
+
+ if (p->conf->cpu.vendor != pcv_unknown) {
+ struct pt_cpu cpu;
+
+ cpu = p->conf->cpu;
+ if (cpu.stepping)
+ n += sprintf(cpu_suffix,
+ "-cpu_%" PRIu16 "_%" PRIu8 "_%" PRIu8 "",
+ cpu.family, cpu.model, cpu.stepping);
+ else
+ n += sprintf(cpu_suffix,
+ "-cpu_%" PRIu16 "_%" PRIu8 "", cpu.family,
+ cpu.model);
+ }
+
+ n += strlen(exp_suffix);
+
+ /* trailing null character. */
+ n += 1;
+
+ filename = malloc(n);
+ if (!filename)
+ return NULL;
+
+ strcpy(filename, p->y->fileroot);
+ if (*extra != '\0') {
+ strcat(filename, "-");
+ strcat(filename, extra);
+ }
+ strcat(filename, cpu_suffix);
+ strcat(filename, exp_suffix);
+
+ return filename;
+}
+
+/* Returns true if @c is part of a label; false otherwise. */
+static int islabelchar(int c)
+{
+ if (isalnum(c))
+ return 1;
+
+ switch (c) {
+ case '_':
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Generates the content of the .exp file by printing all lines with
+ * everything up to and including the first comment semicolon removed.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @p is the NULL pointer.
+ * Returns -err_file_write if the .exp file could not be fully written.
+ */
+static int p_gen_expfile(struct parser *p)
+{
+ int errcode;
+ enum { slen = 1024 };
+ char s[slen];
+ struct pt_directive *pd;
+ char *filename;
+ FILE *f;
+
+ if (bug_on(!p))
+ return -err_internal;
+
+ pd = p->pd;
+
+ /* the directive in the current line must be the .exp directive. */
+ errcode = yasm_pd_parse(p->y, pd);
+ if (bug_on(errcode < 0))
+ return -err_internal;
+
+ if (bug_on(strcmp(pd->name, ".exp") != 0))
+ return -err_internal;
+
+ filename = expfilename(p, pd->payload);
+ if (!filename)
+ return -err_no_mem;
+ f = fopen(filename, "w");
+ if (!f) {
+ free(filename);
+ return -err_file_open;
+ }
+
+ for (;;) {
+ int i;
+ char *line, *comment;
+
+ errcode = yasm_next_line(p->y, s, slen);
+ if (errcode < 0)
+ break;
+
+ errcode = yasm_pd_parse(p->y, pd);
+ if (errcode < 0 && errcode != -err_no_directive)
+ break;
+
+ if (errcode == 0 && strcmp(pd->name, ".exp") == 0) {
+ fclose(f);
+ printf("%s\n", filename);
+ free(filename);
+ filename = expfilename(p, pd->payload);
+ if (!filename)
+ return -err_no_mem;
+ f = fopen(filename, "w");
+ if (!f) {
+ free(filename);
+ return -err_file_open;
+ }
+ continue;
+ }
+
+ line = strchr(s, ';');
+ if (!line)
+ continue;
+
+ line += 1;
+
+ comment = strchr(line, '#');
+ if (comment)
+ *comment = '\0';
+
+ /* remove trailing spaces. */
+ for (i = (int) strlen(line)-1; i >= 0 && isspace(line[i]); i--)
+ line[i] = '\0';
+
+ for (;;) {
+ char *tmp, label[256];
+ uint64_t addr;
+ int zero_padding, qmark_padding, qmark_size, status;
+
+ zero_padding = 0;
+ qmark_padding = 0;
+ qmark_size = 0;
+ status = 0;
+
+ /* find the label character in the string.
+ * if there is no label character, we just print
+ * the rest of the line and end.
+ */
+ tmp = strchr(line, '%');
+ if (!tmp) {
+ if (fprintf(f, "%s", line) < 0) {
+ errcode = -err_file_write;
+ goto error;
+ }
+ break;
+ }
+
+ /* make the label character a null byte and
+ * print the first portion, which does not
+ * belong to the label into the file.
+ */
+ *tmp = '\0';
+ if (fprintf(f, "%s", line) < 0) {
+ errcode = -err_file_write;
+ goto error;
+ }
+
+ /* test if there is a valid label name after the %. */
+ line = tmp+1;
+ if (*line == '\0' || isspace(*line)) {
+ errcode = -err_no_label;
+ goto error;
+ }
+
+ /* check if zero padding is requested. */
+ if (*line == '0') {
+ zero_padding = 1;
+ line += 1;
+ }
+ /* chek if ? padding is requested. */
+ else if (*line == '?') {
+ qmark_padding = 1;
+ zero_padding = 1;
+ qmark_size = 0;
+ line += 1;
+ }
+
+ /* advance i to the first non alpha-numeric
+ * character. all characters everything from
+ * line[0] to line[i-1] belongs to the label
+ * name.
+ */
+ for (i = 0; islabelchar(line[i]); i++)
+ ;
+
+ if (i > 255) {
+ errcode = -err_label_name;
+ goto error;
+ }
+ strncpy(label, line, i);
+ label[i] = '\0';
+
+ /* advance to next character. */
+ line = &line[i];
+
+ /* lookup the label name and print it to the
+ * output file.
+ */
+ errcode = yasm_lookup_label(p->y, &addr, label);
+ if (errcode < 0) {
+ errcode = l_lookup(p->pt_labels, &addr, label);
+ if (errcode < 0)
+ goto error;
+
+ if (zero_padding)
+ status = fprintf(f, "%016" PRIx64, addr);
+ else
+ status = fprintf(f, "%" PRIx64, addr);
+
+ if (status < 0) {
+ errcode = -err_file_write;
+ goto error;
+ }
+
+ continue;
+ }
+
+ /* check if masking is requested. */
+ if (*line == '.') {
+ char *endptr;
+ unsigned long int n;
+
+ line += 1;
+
+ n = strtoul(line, &endptr, 0);
+ /* check if strtol made progress and
+ * stops on a space or null byte.
+ * otherwise the int could not be
+ * parsed.
+ */
+ if (line == endptr ||
+ (*endptr != '\0' && !isspace(*endptr)
+ && !ispunct(*endptr))) {
+ errcode = -err_parse_int;
+ goto error;
+ }
+ if (8 < n) {
+ errcode = -err_parse_int;
+ goto error;
+ }
+
+ addr &= (1ull << (n << 3)) - 1ull;
+ line = endptr;
+
+ qmark_size = (int) (8 - n);
+ }
+
+ if (qmark_padding) {
+ for (i = 0; i < qmark_size; ++i) {
+ status = fprintf(f, "??");
+ if (status < 0) {
+ errcode = -err_file_write;
+ goto error;
+ }
+ }
+
+ for (; i < 8; ++i) {
+ uint8_t byte;
+
+ byte = (uint8_t)(addr >> ((7 - i) * 8));
+
+ status = fprintf(f, "%02" PRIx8, byte);
+ if (status < 0) {
+ errcode = -err_file_write;
+ goto error;
+ }
+ }
+ } else if (zero_padding)
+ status = fprintf(f, "%016" PRIx64, addr);
+ else
+ status = fprintf(f, "%" PRIx64, addr);
+
+ if (status < 0) {
+ errcode = -err_file_write;
+ goto error;
+ }
+
+ }
+
+ if (fprintf(f, "\n") < 0) {
+ errcode = -err_file_write;
+ goto error;
+ }
+ }
+
+error:
+
+ fclose(f);
+ if (errcode < 0 && errcode != -err_out_of_range) {
+ fprintf(stderr, "fatal: %s could not be created:\n", filename);
+ yasm_print_err(p->y, "", errcode);
+ remove(filename);
+ } else
+ printf("%s\n", filename);
+ free(filename);
+
+ /* If there are no lines left, we are done. */
+ if (errcode == -err_out_of_range)
+ return 0;
+
+ return errcode;
+}
+
+static void p_close_files(struct parser *p)
+{
+ if (p->ptfile) {
+ fclose(p->ptfile);
+ p->ptfile = NULL;
+ }
+}
+
+static int p_open_files(struct parser *p)
+{
+ p->ptfile = fopen(p->ptfilename, "wb");
+ if (!p->ptfile) {
+ fprintf(stderr, "open %s failed\n", p->ptfilename);
+ goto error;
+ }
+ return 0;
+
+error:
+ p_close_files(p);
+ return -err_file_open;
+}
+
+static int parse_mwait(uint32_t *hints, uint32_t *ext, char *payload)
+{
+ char *endptr;
+ unsigned long i;
+
+ if (bug_on(!hints || !ext))
+ return -err_internal;
+
+ payload = strtok(payload, ",");
+ if (!payload || *payload == '\0')
+ return -err_parse_no_args;
+
+ i = strtoul(payload, &endptr, 0);
+ if (payload == endptr || *endptr != '\0')
+ return -err_parse_int;
+
+ if (UINT32_MAX < i)
+ return -err_parse_int_too_big;
+
+ *hints = (uint32_t)i;
+
+ payload = strtok(NULL, " ,");
+ if (!payload)
+ return -err_parse_no_args;
+
+ i = strtoul(payload, &endptr, 0);
+ if (payload == endptr || *endptr != '\0')
+ return -err_parse_int;
+
+ if (UINT32_MAX < i)
+ return -err_parse_int_too_big;
+
+ *ext = (uint32_t)i;
+
+ /* no more tokens left. */
+ payload = strtok(NULL, " ");
+ if (payload)
+ return -err_parse_trailing_tokens;
+
+ return 0;
+}
+
+static int parse_c_state(uint8_t *state, uint8_t *sub_state, const char *input)
+{
+ unsigned int maj, min;
+ int matches;
+
+ if (!input)
+ return -err_parse_no_args;
+
+ maj = 0;
+ min = 0;
+ matches = sscanf(input, " c%u.%u", &maj, &min);
+ switch (matches) {
+ case 0:
+ return -err_parse_no_args;
+
+ case 2:
+ if (!sub_state)
+ return -err_parse_c_state_sub;
+
+ if (0xf <= min)
+ return -err_parse_c_state_invalid;
+
+ fallthrough;
+ case 1:
+ if (!state)
+ return -err_internal;
+
+ if (0xf <= maj)
+ return -err_parse_c_state_invalid;
+
+ break;
+ }
+
+ *state = (uint8_t) ((maj - 1) & 0xf);
+ if (sub_state)
+ *sub_state = (uint8_t) ((min - 1) & 0xf);
+
+ return 0;
+}
+
+/* Processes the current directive.
+ * If the encoder returns an error, a message including current file and
+ * line number together with the pt error string is printed on stderr.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @p or @e is the NULL pointer.
+ * Returns -err_pt_lib if the pt encoder returned an error.
+ * Returns -err_parse if a general parsing error was encountered.
+ * Returns -err_parse_unknown_directive if there was an unknown pt directive.
+ */
+static int p_process_pt(struct parser *p, struct pt_encoder *e)
+{
+ struct pt_directive *pd;
+ struct pt_packet packet;
+ char *directive, *payload;
+ int bytes_written, errcode;
+
+ if (bug_on(!p))
+ return -err_internal;
+
+ if (bug_on(!e))
+ return -err_internal;
+
+ pd = p->pd;
+ if (!pd)
+ return -err_internal;
+
+ directive = pd->name;
+ payload = pd->payload;
+
+ if (strcmp(directive, "psb") == 0) {
+ errcode = parse_empty(payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "psb: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_psb;
+ } else if (strcmp(directive, "psbend") == 0) {
+ errcode = parse_empty(payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "psbend: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_psbend;
+ } else if (strcmp(directive, "pad") == 0) {
+ errcode = parse_empty(payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pad: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_pad;
+ } else if (strcmp(directive, "ovf") == 0) {
+ errcode = parse_empty(payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "ovf: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_ovf;
+ } else if (strcmp(directive, "stop") == 0) {
+ errcode = parse_empty(payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "stop: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_stop;
+ } else if (strcmp(directive, "tnt") == 0) {
+ errcode = parse_tnt(&packet.payload.tnt.payload,
+ &packet.payload.tnt.bit_size, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "tnt: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_tnt_8;
+ } else if (strcmp(directive, "tnt64") == 0) {
+ errcode = parse_tnt(&packet.payload.tnt.payload,
+ &packet.payload.tnt.bit_size, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "tnt64: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_tnt_64;
+ } else if (strcmp(directive, "tip") == 0) {
+ errcode = parse_ip(p, &packet.payload.ip.ip,
+ &packet.payload.ip.ipc, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "tip: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_tip;
+ } else if (strcmp(directive, "tip.pge") == 0) {
+ errcode = parse_ip(p, &packet.payload.ip.ip,
+ &packet.payload.ip.ipc, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "tip.pge: parsing failed",
+ errcode);
+ return errcode;
+ }
+ packet.type = ppt_tip_pge;
+ } else if (strcmp(directive, "tip.pgd") == 0) {
+ errcode = parse_ip(p, &packet.payload.ip.ip,
+ &packet.payload.ip.ipc, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "tip.pgd: parsing failed",
+ errcode);
+ return errcode;
+ }
+ packet.type = ppt_tip_pgd;
+ } else if (strcmp(directive, "fup") == 0) {
+ errcode = parse_ip(p, &packet.payload.ip.ip,
+ &packet.payload.ip.ipc, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "fup: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_fup;
+ } else if (strcmp(directive, "mode.exec") == 0) {
+ if (strcmp(payload, "16bit") == 0) {
+ packet.payload.mode.bits.exec.csl = 0;
+ packet.payload.mode.bits.exec.csd = 0;
+ } else if (strcmp(payload, "64bit") == 0) {
+ packet.payload.mode.bits.exec.csl = 1;
+ packet.payload.mode.bits.exec.csd = 0;
+ } else if (strcmp(payload, "32bit") == 0) {
+ packet.payload.mode.bits.exec.csl = 0;
+ packet.payload.mode.bits.exec.csd = 1;
+ } else {
+ errcode = yasm_print_err(p->y,
+ "mode.exec: argument must be one of \"16bit\", \"64bit\" or \"32bit\"",
+ -err_parse);
+ return errcode;
+ }
+ packet.payload.mode.leaf = pt_mol_exec;
+ packet.type = ppt_mode;
+ } else if (strcmp(directive, "mode.tsx") == 0) {
+ if (strcmp(payload, "begin") == 0) {
+ packet.payload.mode.bits.tsx.intx = 1;
+ packet.payload.mode.bits.tsx.abrt = 0;
+ } else if (strcmp(payload, "abort") == 0) {
+ packet.payload.mode.bits.tsx.intx = 0;
+ packet.payload.mode.bits.tsx.abrt = 1;
+ } else if (strcmp(payload, "commit") == 0) {
+ packet.payload.mode.bits.tsx.intx = 0;
+ packet.payload.mode.bits.tsx.abrt = 0;
+ } else {
+ errcode = yasm_print_err(p->y,
+ "mode.tsx: argument must be one of \"begin\", \"abort\" or \"commit\"",
+ -err_parse);
+ return errcode;
+ }
+ packet.payload.mode.leaf = pt_mol_tsx;
+ packet.type = ppt_mode;
+ } else if (strcmp(directive, "pip") == 0) {
+ const char *modifier;
+
+ errcode = parse_uint64(&packet.payload.pip.cr3, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pip: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_pip;
+ packet.payload.pip.nr = 0;
+
+ modifier = strtok(NULL, " ,");
+ if (modifier) {
+ if (strcmp(modifier, "nr") == 0)
+ packet.payload.pip.nr = 1;
+ else {
+ yasm_print_err(p->y, "pip: parsing failed",
+ -err_parse_trailing_tokens);
+ return errcode;
+ }
+ }
+ } else if (strcmp(directive, "tsc") == 0) {
+ errcode = parse_uint64(&packet.payload.tsc.tsc, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "tsc: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_tsc;
+ } else if (strcmp(directive, "cbr") == 0) {
+ errcode = parse_uint8(&packet.payload.cbr.ratio, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "cbr: parsing cbr failed",
+ errcode);
+ return errcode;
+ }
+ packet.type = ppt_cbr;
+ } else if (strcmp(directive, "tma") == 0) {
+ errcode = parse_tma(&packet.payload.tma.ctc,
+ &packet.payload.tma.fc, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "tma: parsing tma failed",
+ errcode);
+ return errcode;
+ }
+ packet.type = ppt_tma;
+ } else if (strcmp(directive, "mtc") == 0) {
+ errcode = parse_uint8(&packet.payload.mtc.ctc, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "mtc: parsing mtc failed",
+ errcode);
+ return errcode;
+ }
+ packet.type = ppt_mtc;
+ } else if (strcmp(directive, "cyc") == 0) {
+ errcode = parse_uint64(&packet.payload.cyc.value, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "cyc: parsing cyc failed",
+ errcode);
+ return errcode;
+ }
+ packet.type = ppt_cyc;
+ } else if (strcmp(directive, "vmcs") == 0) {
+ errcode = parse_uint64(&packet.payload.vmcs.base, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "vmcs: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_vmcs;
+ } else if (strcmp(directive, "mnt") == 0) {
+ errcode = parse_uint64(&packet.payload.mnt.payload, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "mnt: parsing failed", errcode);
+ return errcode;
+ }
+ packet.type = ppt_mnt;
+ } else if (strcmp(directive, "exstop") == 0) {
+ packet.type = ppt_exstop;
+ memset(&packet.payload.exstop, 0,
+ sizeof(packet.payload.exstop));
+
+ if (strcmp(payload, "ip") == 0)
+ packet.payload.exstop.ip = 1;
+ else if (*payload) {
+ yasm_print_err(p->y, "exstop: parsing failed",
+ -err_parse_trailing_tokens);
+ return -err_parse_trailing_tokens;
+ }
+ } else if (strcmp(directive, "mwait") == 0) {
+ errcode = parse_mwait(&packet.payload.mwait.hints,
+ &packet.payload.mwait.ext, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "mwait: parsing failed", errcode);
+ return errcode;
+ }
+
+ packet.type = ppt_mwait;
+ } else if (strcmp(directive, "pwre") == 0) {
+ char *token;
+
+ packet.type = ppt_pwre;
+ memset(&packet.payload.pwre, 0, sizeof(packet.payload.pwre));
+
+ token = strtok(payload, " , ");
+ errcode = parse_c_state(&packet.payload.pwre.state,
+ &packet.payload.pwre.sub_state, token);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pwre: bad C-state", errcode);
+ return errcode;
+ }
+
+ token = strtok(NULL, " ,");
+ if (token) {
+ if (strcmp(token, "hw") == 0)
+ packet.payload.pwre.hw = 1;
+ else {
+ yasm_print_err(p->y, "pwre: parsing failed",
+ -err_parse_trailing_tokens);
+ return -err_parse_trailing_tokens;
+ }
+ }
+ } else if (strcmp(directive, "pwrx") == 0) {
+ char *token;
+
+ packet.type = ppt_pwrx;
+ memset(&packet.payload.pwrx, 0, sizeof(packet.payload.pwrx));
+
+ token = strtok(payload, ":");
+ if (!token) {
+ yasm_print_err(p->y, "pwrx: parsing failed",
+ -err_parse_no_args);
+ return -err_parse_no_args;
+ }
+
+ if (strcmp(token, "int") == 0)
+ packet.payload.pwrx.interrupt = 1;
+ else if (strcmp(token, "st") == 0)
+ packet.payload.pwrx.store = 1;
+ else if (strcmp(token, "hw") == 0)
+ packet.payload.pwrx.autonomous = 1;
+ else {
+ yasm_print_err(p->y, "pwrx: bad wake reason",
+ -err_parse);
+ return -err_parse;
+ }
+
+ token = strtok(NULL, " ,");
+ errcode = parse_c_state(&packet.payload.pwrx.last, NULL, token);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pwrx: bad last C-state", errcode);
+ return errcode;
+ }
+
+ token = strtok(NULL, " ,");
+ errcode = parse_c_state(&packet.payload.pwrx.deepest, NULL,
+ token);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pwrx: bad deepest C-state",
+ errcode);
+ return errcode;
+ }
+ } else if (strcmp(directive, "ptw") == 0) {
+ char *token;
+
+ packet.type = ppt_ptw;
+ memset(&packet.payload.ptw, 0, sizeof(packet.payload.ptw));
+
+ token = strtok(payload, ":");
+ if (!token) {
+ yasm_print_err(p->y, "ptw: parsing failed",
+ -err_parse_no_args);
+ return -err_parse_no_args;
+ }
+
+ errcode = str_to_uint8(token, &packet.payload.ptw.plc, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "ptw: bad payload size", errcode);
+ return errcode;
+ }
+
+ token = strtok(NULL, ", ");
+ if (!token) {
+ yasm_print_err(p->y, "ptw: no payload",
+ -err_parse_no_args);
+ return -err_parse_no_args;
+ }
+
+ errcode = str_to_uint64(token, &packet.payload.ptw.payload, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "ptw: bad payload", errcode);
+ return errcode;
+ }
+
+ token = strtok(NULL, " ");
+ if (token) {
+ if (strcmp(token, "ip") != 0) {
+ yasm_print_err(p->y, "ptw: parse error",
+ -err_parse_trailing_tokens);
+ return -err_parse_trailing_tokens;
+ }
+
+ packet.payload.ptw.ip = 1;
+ }
+ } else {
+ errcode = yasm_print_err(p->y, "invalid syntax",
+ -err_parse_unknown_directive);
+ return errcode;
+ }
+
+ bytes_written = pt_enc_next(e, &packet);
+ if (bytes_written < 0) {
+ char msg[128];
+
+ snprintf(msg, sizeof(msg),
+ "encoder error in directive %s (status %s)", directive,
+ pt_errstr(pt_errcode(bytes_written)));
+
+ yasm_print_err(p->y, msg, -err_pt_lib);
+ } else
+ p->pt_bytes_written += bytes_written;
+
+ return bytes_written;
+}
+
+#if defined(FEATURE_SIDEBAND)
+
+static int sb_open(struct parser *p, const char *fmt, const char *src,
+ const char *prio)
+{
+ struct sb_filelist *sbfiles;
+ const char *root;
+ char name[FILENAME_MAX];
+ FILE *file;
+
+ if (bug_on(!p) || bug_on(!p->y) || bug_on(!prio))
+ return -err_internal;
+
+ root = p->y->fileroot;
+ if (!root) {
+ yasm_print_err(p->y, "open - name root", -err_internal);
+ return -err_internal;
+ }
+
+ if (src && *src)
+ snprintf(name, sizeof(name), "%s-%s-%s-%s%s", root, src, fmt,
+ prio, sb_suffix);
+ else
+ snprintf(name, sizeof(name), "%s-%s-%s%s", root, fmt, prio,
+ sb_suffix);
+
+ for (sbfiles = p->sbfiles; sbfiles; sbfiles = sbfiles->next) {
+ if (strncmp(sbfiles->sbfile.name, name, sizeof(name)) == 0)
+ break;
+ }
+
+ if (!sbfiles) {
+ file = fopen(name, "w");
+ if (!file) {
+ yasm_print_err(p->y, name, -err_file_open);
+ return -err_file_open;
+ }
+
+ sbfiles = malloc(sizeof(*sbfiles));
+ if (!sbfiles) {
+ yasm_print_err(p->y, "open", -err_no_mem);
+ fclose(file);
+ return -err_no_mem;
+ }
+
+ memset(&sbfiles->sbfile, 0, sizeof(sbfiles->sbfile));
+
+ sbfiles->sbfile.name = duplicate_str(name);
+ if (!sbfiles->sbfile.name) {
+ yasm_print_err(p->y, "open", -err_no_mem);
+ fclose(file);
+ free(sbfiles);
+ return -err_no_mem;
+ }
+
+ sbfiles->sbfile.file = file;
+ sbfiles->sbfile.format = sbf_raw;
+
+ sbfiles->next = p->sbfiles;
+ p->sbfiles = sbfiles;
+ }
+
+ p->current_sbfile = &sbfiles->sbfile;
+ return 0;
+}
+
+static struct sb_file *p_get_current_sbfile(struct parser *p)
+{
+ struct sb_file *sb;
+
+ if (bug_on(!p))
+ return NULL;
+
+ sb = p->current_sbfile;
+ if (!sb) {
+ yasm_print_err(p->y, "no sideband file", -err_sb_missing);
+ return NULL;
+ }
+
+ if (bug_on(!sb->file)) {
+ yasm_print_err(p->y, "corrupt sideband file", -err_internal);
+ return NULL;
+ }
+
+ return sb;
+}
+
+static int sb_set_format(struct parser *p, struct sb_file *sb,
+ enum sb_format format)
+{
+ if (bug_on(!p))
+ return -err_internal;
+
+ if (!sb)
+ return -err_sb_missing;
+
+ switch (format) {
+ case sbf_raw:
+ /* Raw sideband directives are allowed for all formats. */
+ return 0;
+
+#if defined(FEATURE_PEVENT)
+ case sbf_pevent:
+ switch (sb->format) {
+ case sbf_pevent:
+ return 0;
+
+ case sbf_raw:
+ sb->format = sbf_pevent;
+
+ memset(&sb->variant.pevent, 0,
+ sizeof(sb->variant.pevent));
+ sb->variant.pevent.config.size =
+ sizeof(sb->variant.pevent.config);
+ sb->variant.pevent.config.time_shift = 0;
+ sb->variant.pevent.config.time_mult = 1;
+ sb->variant.pevent.config.time_zero = 0ull;
+ return 0;
+
+ default:
+ yasm_print_err(p->y, "mixing sideband formats",
+ -err_sb_mix);
+ return -err_sb_mix;
+ }
+#endif /* defined(FEATURE_PEVENT) */
+ }
+
+ yasm_print_err(p->y, "unknown sideband format", -err_internal);
+ return -err_internal;
+}
+
+static int sb_raw(struct parser *p, const void *buffer, size_t size)
+{
+ struct sb_file *sb;
+ size_t written;
+ int errcode;
+
+ if (bug_on(!p))
+ return -err_internal;
+
+ sb = p_get_current_sbfile(p);
+ if (!sb)
+ return -err_sb_missing;
+
+ errcode = sb_set_format(p, sb, sbf_raw);
+ if (errcode < 0)
+ return errcode;
+
+ written = fwrite(buffer, size, 1, sb->file);
+ if (written != 1) {
+ yasm_print_err(p->y, "write failed", -err_file_write);
+ return -err_file_write;
+ }
+
+ sb->bytes_written += (int) size;
+ return 0;
+}
+
+static int sb_raw_8(struct parser *p, uint8_t value)
+{
+ return sb_raw(p, &value, sizeof(value));
+}
+
+static int sb_raw_16(struct parser *p, uint16_t value)
+{
+ return sb_raw(p, &value, sizeof(value));
+}
+
+static int sb_raw_32(struct parser *p, uint32_t value)
+{
+ return sb_raw(p, &value, sizeof(value));
+}
+
+static int sb_raw_64(struct parser *p, uint64_t value)
+{
+ return sb_raw(p, &value, sizeof(value));
+}
+
+#if defined(FEATURE_PEVENT)
+
+/* A buffer to hold sample values to which a pev_event can point. */
+
+struct pev_sample_buffer {
+ uint32_t pid;
+ uint32_t tid;
+ uint64_t time;
+ uint64_t id;
+ uint64_t stream_id;
+ uint32_t cpu;
+ uint64_t identifier;
+};
+
+static int pevent_sample_type(struct parser *p, uint64_t sample_type)
+{
+ struct sb_file *sb;
+ int errcode;
+
+ sb = p_get_current_sbfile(p);
+ if (!sb)
+ return -err_sb_missing;
+
+ errcode = sb_set_format(p, sb, sbf_pevent);
+ if (errcode < 0)
+ return errcode;
+
+ if (sb->variant.pevent.is_final) {
+ yasm_print_err(p->y,
+ "the sideband configuration can no longer be "
+ "modified", -err_sb_final);
+ return -err_sb_final;
+ }
+
+ sb->variant.pevent.config.sample_type = sample_type;
+ return 0;
+}
+
+static int pevent_process_samples(struct pev_event *event,
+ struct pev_sample_buffer *samples,
+ struct parser *p,
+ const struct pev_config *config,
+ char *payload)
+{
+ char *token;
+
+ if (bug_on(!event) || bug_on(!samples) || bug_on(!config))
+ return -err_internal;
+
+ if (config->sample_type & PERF_SAMPLE_TID) {
+ int errcode;
+
+ token = strtok(payload, " ,");
+ if (!token) {
+ yasm_print_err(p->y, "pid missing", -err_parse);
+ return -err_parse;
+ }
+
+ payload = NULL;
+
+ errcode = str_to_uint32(token, &samples->pid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "bad pid", errcode);
+ return errcode;
+ }
+
+ token = strtok(payload, " ,");
+ if (!token) {
+ yasm_print_err(p->y, "tid missing", -err_parse);
+ return -err_parse;
+ }
+
+ errcode = str_to_uint32(token, &samples->tid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "bad tid", errcode);
+ return errcode;
+ }
+
+ event->sample.pid = &samples->pid;
+ event->sample.tid = &samples->tid;
+ }
+
+ if (config->sample_type & PERF_SAMPLE_TIME) {
+ int errcode;
+
+ token = strtok(payload, " ,");
+ if (!token) {
+ yasm_print_err(p->y, "tsc missing", -err_parse);
+ return -err_parse;
+ }
+
+ payload = NULL;
+
+ errcode = str_to_uint64(token, &event->sample.tsc, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "bad tsc", errcode);
+ return errcode;
+ }
+
+ errcode = pev_time_from_tsc(&samples->time, event->sample.tsc,
+ config);
+ if (errcode < 0) {
+ fprintf(stderr, "error converting tsc %"PRIx64": %s\n",
+ event->sample.tsc,
+ pt_errstr(pt_errcode(errcode)));
+ return -err_pt_lib;
+ }
+
+ event->sample.time = &samples->time;
+ }
+
+ if (config->sample_type & PERF_SAMPLE_ID) {
+ int errcode;
+
+ token = strtok(payload, " ,");
+ if (!token) {
+ yasm_print_err(p->y, "id missing", -err_parse);
+ return -err_parse;
+ }
+
+ payload = NULL;
+
+ errcode = str_to_uint64(token, &samples->id, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "bad id", errcode);
+ return errcode;
+ }
+
+ event->sample.id = &samples->id;
+ }
+
+ if (config->sample_type & PERF_SAMPLE_CPU) {
+ int errcode;
+
+ token = strtok(payload, " ,");
+ if (!token) {
+ yasm_print_err(p->y, "cpu missing", -err_parse);
+ return -err_parse;
+ }
+
+ payload = NULL;
+
+ errcode = str_to_uint32(token, &samples->cpu, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "bad cpu", errcode);
+ return errcode;
+ }
+
+ event->sample.cpu = &samples->cpu;
+ }
+
+ if (config->sample_type & PERF_SAMPLE_STREAM_ID) {
+ int errcode;
+
+ token = strtok(payload, " ,");
+ if (!token) {
+ yasm_print_err(p->y, "stream missing", -err_parse);
+ return -err_parse;
+ }
+
+ payload = NULL;
+
+ errcode = str_to_uint64(token, &samples->stream_id, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "bad stream", errcode);
+ return errcode;
+ }
+
+ event->sample.stream_id = &samples->stream_id;
+ }
+
+ if (config->sample_type & PERF_SAMPLE_IDENTIFIER) {
+ int errcode;
+
+ token = strtok(payload, " ,");
+ if (!token) {
+ yasm_print_err(p->y, "identifier missing", -err_parse);
+ return -err_parse;
+ }
+
+ payload = NULL;
+
+ errcode = str_to_uint64(token, &samples->identifier, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "bad identifier", errcode);
+ return errcode;
+ }
+
+ event->sample.identifier = &samples->identifier;
+ }
+
+ token = strtok(payload, " ,");
+ if (token) {
+ yasm_print_err(p->y, "unexpected samples", -err_parse);
+ return -err_parse;
+ }
+
+ return 0;
+}
+
+static int sb_pevent(struct parser *p, struct pev_event *event, char *payload)
+{
+ const struct pev_config *config;
+ struct pev_sample_buffer samples;
+ struct sb_file *sb;
+ uint8_t raw[FILENAME_MAX];
+ int errcode, size;
+
+ memset(raw, 0, sizeof(raw));
+
+ sb = p_get_current_sbfile(p);
+ if (!sb)
+ return -err_sb_missing;
+
+ errcode = sb_set_format(p, sb, sbf_pevent);
+ if (errcode < 0)
+ return errcode;
+
+ config = &sb->variant.pevent.config;
+
+ errcode = pevent_process_samples(event, &samples, p, config, payload);
+ if (errcode < 0)
+ return errcode;
+
+ size = pev_write(event, raw, raw + sizeof(raw), config);
+ if (size < 0) {
+ fprintf(stderr, "error writing pevent sample: %s\n",
+ pt_errstr(pt_errcode(size)));
+ return -err_pt_lib;
+ }
+
+ /* Emitting a pevent sideband event finalizes the configuration. */
+ sb->variant.pevent.is_final = 1;
+
+ return sb_raw(p, raw, size);
+}
+
+static int pevent_mmap_section(struct parser *p, const char *section,
+ const char *pid, const char *tid)
+{
+ union {
+ struct pev_record_mmap mmap;
+ uint8_t buffer[FILENAME_MAX];
+ } record;
+ struct pev_event event;
+ const char *filename;
+ uint64_t start, org;
+ int errcode;
+
+ if (bug_on(!p) || bug_on(!p->y))
+ return -err_internal;
+
+ memset(record.buffer, 0, sizeof(record.buffer));
+ memset(&event, 0, sizeof(event));
+
+ filename = p->y->binfile;
+ if (!filename) {
+ yasm_print_err(p->y, "pevent-mmap-section - filename",
+ -err_internal);
+ return -err_internal;
+ }
+
+ strncpy(record.mmap.filename, filename,
+ sizeof(record.buffer) - sizeof(record.mmap));
+
+ errcode = str_to_uint32(pid, &record.mmap.pid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-mmap-section - pid", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint32(tid, &record.mmap.tid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-mmap-section - tid", errcode);
+ return errcode;
+ }
+
+ errcode = yasm_lookup_section_label(p->y, section, "vstart",
+ &record.mmap.addr);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-mmap-section - section vstart",
+ errcode);
+ return errcode;
+ }
+
+ errcode = yasm_lookup_section_label(p->y, section, "length",
+ &record.mmap.len);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-mmap-section - section length",
+ errcode);
+ return errcode;
+ }
+
+ errcode = yasm_lookup_section_label(p->y, section, "start",
+ &start);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-mmap-section - section start",
+ errcode);
+ return errcode;
+ }
+
+ errcode = yasm_lookup_label(p->y, &org, "org");
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-mmap-section - org",
+ errcode);
+ return errcode;
+ }
+
+ if (start < org) {
+ yasm_print_err(p->y, "corrupt section labels", -err_internal);
+ return -err_internal;
+ }
+
+ record.mmap.pgoff = start - org;
+
+ event.type = PERF_RECORD_MMAP;
+ event.record.mmap = &record.mmap;
+
+ return sb_pevent(p, &event, NULL);
+}
+
+static int pevent_mmap(struct parser *p, const char *pid, const char *tid,
+ const char *addr, const char *len, const char *pgoff,
+ const char *filename)
+{
+ union {
+ struct pev_record_mmap mmap;
+ uint8_t buffer[FILENAME_MAX];
+ } record;
+ struct pev_event event;
+ int errcode;
+
+ if (bug_on(!p) || bug_on(!p->y))
+ return -err_internal;
+
+ memset(record.buffer, 0, sizeof(record.buffer));
+ memset(&event, 0, sizeof(event));
+
+ errcode = str_to_uint32(pid, &record.mmap.pid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-mmap-section - pid", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint32(tid, &record.mmap.tid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-mmap-section - tid", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint64(addr, &record.mmap.addr, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-mmap-section - addr", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint64(len, &record.mmap.len, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-mmap-section - len", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint64(pgoff, &record.mmap.pgoff, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-mmap-section - pgoff", errcode);
+ return errcode;
+ }
+
+ strncpy(record.mmap.filename, filename,
+ sizeof(record.buffer) - sizeof(record.mmap));
+
+ event.type = PERF_RECORD_MMAP;
+ event.record.mmap = &record.mmap;
+
+ return sb_pevent(p, &event, NULL);
+}
+
+static int pevent_lost(struct parser *p, const char *id, const char *lost)
+{
+ union {
+ struct pev_record_lost lost;
+ uint8_t buffer[1024];
+ } record;
+ struct pev_event event;
+ int errcode;
+
+ if (bug_on(!p) || bug_on(!p->y))
+ return -err_internal;
+
+ memset(record.buffer, 0, sizeof(record.buffer));
+ memset(&event, 0, sizeof(event));
+
+ errcode = str_to_uint64(id, &record.lost.id, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-lost - id", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint64(lost, &record.lost.lost, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-lost - lost", errcode);
+ return errcode;
+ }
+
+ event.type = PERF_RECORD_LOST;
+ event.record.lost = &record.lost;
+
+ return sb_pevent(p, &event, NULL);
+}
+
+static int pevent_comm(struct parser *p, const char *pid, const char *tid,
+ const char *comm, uint16_t misc)
+{
+ union {
+ struct pev_record_comm comm;
+ uint8_t buffer[FILENAME_MAX];
+ } record;
+ struct pev_event event;
+ int errcode;
+
+ if (bug_on(!p) || bug_on(!p->y))
+ return -err_internal;
+
+ memset(record.buffer, 0, sizeof(record.buffer));
+ memset(&event, 0, sizeof(event));
+
+ errcode = str_to_uint32(pid, &record.comm.pid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-comm - pid", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint32(tid, &record.comm.tid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-comm - tid", errcode);
+ return errcode;
+ }
+
+ strcpy(record.comm.comm, comm);
+
+ event.type = PERF_RECORD_COMM;
+ event.misc = misc;
+ event.record.comm = &record.comm;
+
+ return sb_pevent(p, &event, NULL);
+}
+
+static int pevent_exit(struct parser *p, const char *pid, const char *ppid,
+ const char *tid, const char *ptid, const char *time)
+{
+ union {
+ struct pev_record_exit exit;
+ uint8_t buffer[1024];
+ } record;
+ struct pev_event event;
+ int errcode;
+
+ if (bug_on(!p) || bug_on(!p->y))
+ return -err_internal;
+
+ memset(record.buffer, 0, sizeof(record.buffer));
+ memset(&event, 0, sizeof(event));
+
+ errcode = str_to_uint32(pid, &record.exit.pid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-exit - pid", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint32(ppid, &record.exit.ppid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-exit - ppid", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint32(tid, &record.exit.tid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-exit - tid", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint32(ptid, &record.exit.ptid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-exit - ptid", errcode);
+ return errcode;
+ }
+
+
+ errcode = str_to_uint64(time, &record.exit.time, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-exit - time", errcode);
+ return errcode;
+ }
+
+ event.type = PERF_RECORD_EXIT;
+ event.record.exit = &record.exit;
+
+ return sb_pevent(p, &event, NULL);
+}
+
+static int pevent_fork(struct parser *p, const char *pid, const char *ppid,
+ const char *tid, const char *ptid, const char *time)
+{
+ union {
+ struct pev_record_fork fork;
+ uint8_t buffer[1024];
+ } record;
+ struct pev_event event;
+ int errcode;
+
+ if (bug_on(!p) || bug_on(!p->y))
+ return -err_internal;
+
+ memset(record.buffer, 0, sizeof(record.buffer));
+ memset(&event, 0, sizeof(event));
+
+ errcode = str_to_uint32(pid, &record.fork.pid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-fork - pid", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint32(ppid, &record.fork.ppid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-fork - ppid", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint32(tid, &record.fork.tid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-fork - tid", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint32(ptid, &record.fork.ptid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-fork - ptid", errcode);
+ return errcode;
+ }
+
+
+ errcode = str_to_uint64(time, &record.fork.time, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-fork - time", errcode);
+ return errcode;
+ }
+
+ event.type = PERF_RECORD_FORK;
+ event.record.fork = &record.fork;
+
+ return sb_pevent(p, &event, NULL);
+}
+
+static int pevent_aux(struct parser *p, const char *offset, const char *size,
+ const char *flags)
+{
+ union {
+ struct pev_record_aux aux;
+ uint8_t buffer[1024];
+ } record;
+ struct pev_event event;
+ int errcode;
+
+ if (bug_on(!p) || bug_on(!p->y))
+ return -err_internal;
+
+ memset(record.buffer, 0, sizeof(record.buffer));
+ memset(&event, 0, sizeof(event));
+
+ errcode = str_to_uint64(offset, &record.aux.aux_offset, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-aux - offset", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint64(size, &record.aux.aux_size, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-aux - size", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint64(flags, &record.aux.flags, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-aux - flags", errcode);
+ return errcode;
+ }
+
+ event.type = PERF_RECORD_AUX;
+ event.record.aux = &record.aux;
+
+ return sb_pevent(p, &event, NULL);
+}
+
+static int pevent_itrace_start(struct parser *p, const char *pid,
+ const char *tid)
+{
+ union {
+ struct pev_record_itrace_start itrace_start;
+ uint8_t buffer[1024];
+ } record;
+ struct pev_event event;
+ int errcode;
+
+ if (bug_on(!p) || bug_on(!p->y))
+ return -err_internal;
+
+ memset(record.buffer, 0, sizeof(record.buffer));
+ memset(&event, 0, sizeof(event));
+
+ errcode = str_to_uint32(pid, &record.itrace_start.pid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-itrace-start - pid", errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint32(tid, &record.itrace_start.tid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-itrace-start - tid", errcode);
+ return errcode;
+ }
+
+ event.type = PERF_RECORD_ITRACE_START;
+ event.record.itrace_start = &record.itrace_start;
+
+ return sb_pevent(p, &event, NULL);
+}
+
+static int pevent_lost_samples(struct parser *p, const char *lost)
+{
+ union {
+ struct pev_record_lost_samples lost_samples;
+ uint8_t buffer[1024];
+ } record;
+ struct pev_event event;
+ int errcode;
+
+ if (bug_on(!p) || bug_on(!p->y))
+ return -err_internal;
+
+ memset(record.buffer, 0, sizeof(record.buffer));
+ memset(&event, 0, sizeof(event));
+
+ errcode = str_to_uint64(lost, &record.lost_samples.lost, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y, "pevent-lost-samples - lost", errcode);
+ return errcode;
+ }
+
+ event.type = PERF_RECORD_LOST_SAMPLES;
+ event.record.lost_samples = &record.lost_samples;
+
+ return sb_pevent(p, &event, NULL);
+}
+
+static int pevent_switch(struct parser *p, uint16_t misc, char *payload)
+{
+ struct pev_event event;
+
+ if (bug_on(!p) || bug_on(!p->y))
+ return -err_internal;
+
+ memset(&event, 0, sizeof(event));
+
+ event.type = PERF_RECORD_SWITCH;
+ event.misc = misc;
+
+ return sb_pevent(p, &event, payload);
+}
+
+static int pevent_switch_cpu_wide(struct parser *p, const char *pid,
+ const char *tid, uint16_t misc)
+{
+ union {
+ struct pev_record_switch_cpu_wide switch_cpu_wide;
+ uint8_t buffer[1024];
+ } record;
+ struct pev_event event;
+ int errcode;
+
+ if (bug_on(!p) || bug_on(!p->y))
+ return -err_internal;
+
+ memset(record.buffer, 0, sizeof(record.buffer));
+ memset(&event, 0, sizeof(event));
+
+ event.misc = misc;
+
+ errcode = str_to_uint32(pid, &record.switch_cpu_wide.next_prev_pid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y,
+ "pevent-switch-cpu-wide - next_prev_pid",
+ errcode);
+ return errcode;
+ }
+
+ errcode = str_to_uint32(tid, &record.switch_cpu_wide.next_prev_tid, 0);
+ if (errcode < 0) {
+ yasm_print_err(p->y,
+ "pevent-switch-cpu-wide - next_prev_tid",
+ errcode);
+ return errcode;
+ }
+
+ event.type = PERF_RECORD_SWITCH_CPU_WIDE;
+ event.record.switch_cpu_wide = &record.switch_cpu_wide;
+
+ return sb_pevent(p, &event, NULL);
+}
+#endif /* defined(FEATURE_PEVENT) */
+
+/* Process a @sb directive.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @p is the NULL pointer.
+ * Returns -err_parse if a general parsing error was encountered.
+ * Returns -err_parse_unknown_directive if there was an unknown pt directive.
+ */
+static int p_process_sb(struct parser *p)
+{
+ struct pt_directive *pd;
+ char *directive, *payload;
+
+ if (bug_on(!p))
+ return -err_internal;
+
+ pd = p->pd;
+ if (!pd)
+ return -err_internal;
+
+ directive = pd->name;
+ payload = pd->payload;
+
+ if (strcmp(directive, "primary") == 0) {
+ char *fmt, *src;
+
+ fmt = strtok(payload, " ,");
+ if (!fmt) {
+ yasm_print_err(p->y, "primary - format missing",
+ -err_parse_no_args);
+ return -err_parse_no_args;
+ }
+
+ src = strtok(NULL, " ");
+
+ return sb_open(p, fmt, src, "primary");
+ } else if (strcmp(directive, "secondary") == 0) {
+ char *fmt, *src;
+
+ fmt = strtok(payload, " ,");
+ if (!fmt) {
+ yasm_print_err(p->y, "secondary - format missing",
+ -err_parse_no_args);
+ return -err_parse_no_args;
+ }
+
+ src = strtok(NULL, " ");
+
+ return sb_open(p, fmt, src, "secondary");
+ } else if (strcmp(directive, "raw-8") == 0) {
+ uint8_t value;
+ int errcode;
+
+ errcode = parse_uint8(&value, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, payload, errcode);
+ return errcode;
+ }
+
+ return sb_raw_8(p, value);
+ } else if (strcmp(directive, "raw-16") == 0) {
+ uint16_t value;
+ int errcode;
+
+ errcode = parse_uint16(&value, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, payload, errcode);
+ return errcode;
+ }
+
+ return sb_raw_16(p, value);
+ } else if (strcmp(directive, "raw-32") == 0) {
+ uint32_t value;
+ int errcode;
+
+ errcode = parse_uint32(&value, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, payload, errcode);
+ return errcode;
+ }
+
+ return sb_raw_32(p, value);
+ } else if (strcmp(directive, "raw-64") == 0) {
+ uint64_t value;
+ int errcode;
+
+ errcode = parse_uint64(&value, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, payload, errcode);
+ return errcode;
+ }
+
+ return sb_raw_64(p, value);
+#if defined(FEATURE_PEVENT)
+ } else if (strcmp(directive, "pevent-sample_type") == 0) {
+ uint64_t sample_type;
+ char *token;
+
+ sample_type = 0ull;
+
+ for (token = strtok(payload, " ,"); token;
+ token = strtok(NULL, " ,")) {
+
+ if (strcmp(token, "tid") == 0)
+ sample_type |= (uint64_t) PERF_SAMPLE_TID;
+ else if (strcmp(token, "time") == 0)
+ sample_type |= (uint64_t) PERF_SAMPLE_TIME;
+ else if (strcmp(token, "id") == 0)
+ sample_type |= (uint64_t) PERF_SAMPLE_ID;
+ else if (strcmp(token, "stream") == 0)
+ sample_type |= (uint64_t) PERF_SAMPLE_STREAM_ID;
+ else if (strcmp(token, "cpu") == 0)
+ sample_type |= (uint64_t) PERF_SAMPLE_CPU;
+ else if (strcmp(token, "identifier") == 0)
+ sample_type |=
+ (uint64_t) PERF_SAMPLE_IDENTIFIER;
+ else {
+ uint64_t value;
+ int errcode;
+
+ errcode = parse_uint64(&value, payload);
+ if (errcode < 0) {
+ yasm_print_err(p->y, token, errcode);
+ return errcode;
+ }
+
+ sample_type |= value;
+ }
+ }
+
+ return pevent_sample_type(p, sample_type);
+
+ } else if (strcmp(directive, "pevent-mmap-section") == 0) {
+ char *section, *pid, *tid;
+
+ section = strtok(payload, " ,");
+ if (!section) {
+ yasm_print_err(p->y, "section missing", -err_parse);
+ return -err_parse;
+ }
+
+ pid = strtok(NULL, " ,");
+ if (!pid) {
+ yasm_print_err(p->y, "pid missing", -err_parse);
+ return -err_parse;
+ }
+
+ tid = strtok(NULL, " ,");
+ if (!tid) {
+ yasm_print_err(p->y, "tid missing", -err_parse);
+ return -err_parse;
+ }
+
+ return pevent_mmap_section(p, section, pid, tid);
+ } else if (strcmp(directive, "pevent-mmap") == 0) {
+ char *pid, *tid, *addr, *len, *pgoff, *filename;
+
+ pid = strtok(payload, " ,");
+ if (!pid) {
+ yasm_print_err(p->y, "pid missing", -err_parse);
+ return -err_parse;
+ }
+
+ tid = strtok(NULL, " ,");
+ if (!tid) {
+ yasm_print_err(p->y, "tid missing", -err_parse);
+ return -err_parse;
+ }
+
+ addr = strtok(NULL, " ,");
+ if (!addr) {
+ yasm_print_err(p->y, "addr missing", -err_parse);
+ return -err_parse;
+ }
+
+ len = strtok(NULL, " ,");
+ if (!len) {
+ yasm_print_err(p->y, "len missing", -err_parse);
+ return -err_parse;
+ }
+
+ pgoff = strtok(NULL, " ,");
+ if (!pgoff) {
+ yasm_print_err(p->y, "pgoff missing", -err_parse);
+ return -err_parse;
+ }
+
+ filename = strtok(NULL, " ,");
+ if (!filename) {
+ yasm_print_err(p->y, "filename missing", -err_parse);
+ return -err_parse;
+ }
+
+ return pevent_mmap(p, pid, tid, addr, len, pgoff, filename);
+ } else if (strcmp(directive, "pevent-lost") == 0) {
+ char *id, *lost;
+
+ id = strtok(payload, " ,");
+ if (!id) {
+ yasm_print_err(p->y, "id missing", -err_parse);
+ return -err_parse;
+ }
+
+ lost = strtok(NULL, " ,");
+ if (!lost) {
+ yasm_print_err(p->y, "lost missing", -err_parse);
+ return -err_parse;
+ }
+
+ return pevent_lost(p, id, lost);
+ } else if (strcmp(directive, "pevent-comm") == 0) {
+ char *pid, *tid, *comm;
+
+ pid = strtok(payload, " ,");
+ if (!pid) {
+ yasm_print_err(p->y, "pid missing", -err_parse);
+ return -err_parse;
+ }
+
+ tid = strtok(NULL, " ,");
+ if (!tid) {
+ yasm_print_err(p->y, "tid missing", -err_parse);
+ return -err_parse;
+ }
+
+ comm = strtok(NULL, " ,");
+ if (!comm) {
+ yasm_print_err(p->y, "comm missing", -err_parse);
+ return -err_parse;
+ }
+
+ return pevent_comm(p, pid, tid, comm, 0);
+ } else if (strcmp(directive, "pevent-comm.exec") == 0) {
+ char *pid, *tid, *comm;
+
+ pid = strtok(payload, " ,");
+ if (!pid) {
+ yasm_print_err(p->y, "pid missing", -err_parse);
+ return -err_parse;
+ }
+
+ tid = strtok(NULL, " ,");
+ if (!tid) {
+ yasm_print_err(p->y, "tid missing", -err_parse);
+ return -err_parse;
+ }
+
+ comm = strtok(NULL, " ,");
+ if (!comm) {
+ yasm_print_err(p->y, "comm missing", -err_parse);
+ return -err_parse;
+ }
+
+ return pevent_comm(p, pid, tid, comm,
+ PERF_RECORD_MISC_COMM_EXEC);
+ } else if (strcmp(directive, "pevent-exit") == 0) {
+ char *pid, *ppid, *tid, *ptid, *time;
+
+ pid = strtok(payload, " ,");
+ if (!pid) {
+ yasm_print_err(p->y, "pid missing", -err_parse);
+ return -err_parse;
+ }
+
+ ppid = strtok(NULL, " ,");
+ if (!ppid) {
+ yasm_print_err(p->y, "ppid missing", -err_parse);
+ return -err_parse;
+ }
+
+ tid = strtok(NULL, " ,");
+ if (!tid) {
+ yasm_print_err(p->y, "tid missing", -err_parse);
+ return -err_parse;
+ }
+
+ ptid = strtok(NULL, " ,");
+ if (!ptid) {
+ yasm_print_err(p->y, "ptid missing", -err_parse);
+ return -err_parse;
+ }
+
+ time = strtok(NULL, " ,");
+ if (!time) {
+ yasm_print_err(p->y, "time missing", -err_parse);
+ return -err_parse;
+ }
+
+ return pevent_exit(p, pid, ppid, tid, ptid, time);
+ } else if (strcmp(directive, "pevent-fork") == 0) {
+ char *pid, *ppid, *tid, *ptid, *time;
+
+ pid = strtok(payload, " ,");
+ if (!pid) {
+ yasm_print_err(p->y, "pid missing", -err_parse);
+ return -err_parse;
+ }
+
+ ppid = strtok(NULL, " ,");
+ if (!ppid) {
+ yasm_print_err(p->y, "ppid missing", -err_parse);
+ return -err_parse;
+ }
+
+ tid = strtok(NULL, " ,");
+ if (!tid) {
+ yasm_print_err(p->y, "tid missing", -err_parse);
+ return -err_parse;
+ }
+
+ ptid = strtok(NULL, " ,");
+ if (!ptid) {
+ yasm_print_err(p->y, "ptid missing", -err_parse);
+ return -err_parse;
+ }
+
+ time = strtok(NULL, " ,");
+ if (!time) {
+ yasm_print_err(p->y, "time missing", -err_parse);
+ return -err_parse;
+ }
+
+ return pevent_fork(p, pid, ppid, tid, ptid, time);
+ } else if (strcmp(directive, "pevent-aux") == 0) {
+ char *offset, *size, *flags;
+
+ offset = strtok(payload, " ,");
+ if (!offset) {
+ yasm_print_err(p->y, "offset missing", -err_parse);
+ return -err_parse;
+ }
+
+ size = strtok(NULL, " ,");
+ if (!size) {
+ yasm_print_err(p->y, "size missing", -err_parse);
+ return -err_parse;
+ }
+
+ flags = strtok(NULL, " ,");
+ if (!flags) {
+ yasm_print_err(p->y, "flags missing", -err_parse);
+ return -err_parse;
+ }
+
+ return pevent_aux(p, offset, size, flags);
+ } else if (strcmp(directive, "pevent-itrace-start") == 0) {
+ char *pid, *tid;
+
+ pid = strtok(payload, " ,");
+ if (!pid) {
+ yasm_print_err(p->y, "pid missing", -err_parse);
+ return -err_parse;
+ }
+
+ tid = strtok(NULL, " ,");
+ if (!tid) {
+ yasm_print_err(p->y, "tid missing", -err_parse);
+ return -err_parse;
+ }
+
+ return pevent_itrace_start(p, pid, tid);
+ } else if (strcmp(directive, "pevent-lost-samples") == 0) {
+ char *lost;
+
+ lost = strtok(payload, " ,");
+ if (!lost) {
+ yasm_print_err(p->y, "lost missing", -err_parse);
+ return -err_parse;
+ }
+
+ return pevent_lost_samples(p, lost);
+ } else if (strcmp(directive, "pevent-switch.in") == 0)
+ return pevent_switch(p, 0u, payload);
+ else if (strcmp(directive, "pevent-switch.out") == 0)
+ return pevent_switch(p, PERF_RECORD_MISC_SWITCH_OUT, payload);
+ else if (strcmp(directive, "pevent-switch-cpu-wide.in") == 0) {
+ char *pid, *tid;
+
+ pid = strtok(payload, " ,");
+ if (!pid) {
+ yasm_print_err(p->y, "pid missing", -err_parse);
+ return -err_parse;
+ }
+
+ tid = strtok(NULL, " ,");
+ if (!tid) {
+ yasm_print_err(p->y, "tid missing", -err_parse);
+ return -err_parse;
+ }
+
+ return pevent_switch_cpu_wide(p, pid, tid, 0u);
+ } else if (strcmp(directive, "pevent-switch-cpu-wide.out") == 0) {
+ char *pid, *tid;
+
+ pid = strtok(payload, " ,");
+ if (!pid) {
+ yasm_print_err(p->y, "pid missing", -err_parse);
+ return -err_parse;
+ }
+
+ tid = strtok(NULL, " ,");
+ if (!tid) {
+ yasm_print_err(p->y, "tid missing", -err_parse);
+ return -err_parse;
+ }
+
+ return pevent_switch_cpu_wide(p, pid, tid,
+ PERF_RECORD_MISC_SWITCH_OUT);
+#endif /* defined(FEATURE_PEVENT) */
+ } else {
+ yasm_print_err(p->y, "syntax error",
+ -err_parse_unknown_directive);
+ return -err_parse_unknown_directive;
+ }
+}
+
+#endif /* defined(FEATURE_SIDEBAND) */
+
+/* Processes the current directive.
+ * If the encoder returns an error, a message including current file and
+ * line number together with the pt error string is printed on stderr.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_internal if @p or @e is the NULL pointer.
+ * Returns -err_parse_missing_directive if there was a pt directive marker,
+ * but no directive.
+ * Returns -stop_process if the .exp directive was encountered.
+ * Returns -err_pt_lib if the pt encoder returned an error.
+ * Returns -err_parse if a general parsing error was encountered.
+ * Returns -err_parse_unknown_directive if there was an unknown pt directive.
+ */
+static int p_process(struct parser *p, struct pt_encoder *e)
+{
+ char *directive, *tmp;
+ struct pt_directive *pd;
+
+ if (bug_on(!p))
+ return -err_internal;
+
+ pd = p->pd;
+ if (!pd)
+ return -err_internal;
+
+ directive = pd->name;
+
+ /* We must have a directive. */
+ if (!directive || (strcmp(directive, "") == 0))
+ return yasm_print_err(p->y, "invalid syntax",
+ -err_parse_missing_directive);
+
+ /* Check for special directives - they won't contain labels. */
+ if (strcmp(directive, ".exp") == 0) {
+ int errcode;
+
+ /* this is the end of processing pt directives, so we
+ * add a p_last label to the pt directive labels.
+ */
+ errcode = l_append(p->pt_labels, "eos", p->pt_bytes_written);
+ if (errcode < 0)
+ return yasm_print_err(p->y, "append label", errcode);
+
+ return -stop_process;
+ }
+
+ /* find a label name. */
+ tmp = strchr(directive, ':');
+ if (tmp) {
+ char *pt_label_name;
+ uint64_t x;
+ int errcode, bytes_written;
+ size_t len;
+
+ pt_label_name = directive;
+ directive = tmp+1;
+ *tmp = '\0';
+
+ /* ignore whitespace between label and directive. */
+ while (isspace(*directive))
+ directive += 1;
+
+ /* we must have a directive, not just a label. */
+ if (strcmp(directive, "") == 0)
+ return yasm_print_err(p->y, "invalid syntax",
+ -err_parse_missing_directive);
+
+ /* if we can lookup a yasm label with the same name, the
+ * current pt directive label is invalid. */
+ errcode = yasm_lookup_label(p->y, &x, pt_label_name);
+ if (errcode == 0)
+ errcode = -err_label_not_unique;
+
+ if (errcode != -err_no_label)
+ return yasm_print_err(p->y, "label lookup",
+ errcode);
+
+ /* if we can lookup a pt directive label with the same
+ * name, the current pt directive label is invalid. */
+ errcode = l_lookup(p->pt_labels, &x, pt_label_name);
+ if (errcode == 0)
+ errcode = -err_label_not_unique;
+
+ if (errcode != -err_no_label)
+ return yasm_print_err(p->y, "label lookup",
+ -err_label_not_unique);
+
+ bytes_written = -pte_internal;
+ switch (pd->kind) {
+ case pdk_pt:
+ bytes_written = p->pt_bytes_written;
+ break;
+
+#if defined(FEATURE_SIDEBAND)
+ case pdk_sb: {
+ struct sb_file *sb;
+
+ sb = p_get_current_sbfile(p);
+ if (!sb)
+ return yasm_print_err(p->y, "sideband label",
+ -err_sb_missing);
+
+ bytes_written = sb->bytes_written;
+ }
+ break;
+#endif /* defined(FEATURE_SIDEBAND) */
+ }
+
+ if (bytes_written < 0)
+ return bytes_written;
+
+ errcode = l_append(p->pt_labels, pt_label_name, bytes_written);
+ if (errcode < 0)
+ return errcode;
+
+ /* Update the directive name in the parser. */
+ len = strlen(directive) + 1;
+ memmove(pd->name, directive, len);
+ }
+
+ switch (pd->kind) {
+ case pdk_pt:
+ return p_process_pt(p, e);
+
+#if defined(FEATURE_SIDEBAND)
+ case pdk_sb:
+ return p_process_sb(p);
+#endif
+ }
+
+ return -err_internal;
+}
+
+/* Starts the parsing process.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ * Returns -err_pt_lib if the pt encoder could not be initialized.
+ * Returns -err_file_write if the .pt or .exp file could not be fully
+ * written.
+ */
+static int p_start(struct parser *p)
+{
+ int errcode;
+
+ if (bug_on(!p))
+ return -err_internal;
+
+ errcode = yasm_parse(p->y);
+ if (errcode < 0)
+ return errcode;
+
+ for (;;) {
+ int bytes_written;
+ struct pt_encoder *e;
+
+ errcode = yasm_next_pt_directive(p->y, p->pd);
+ if (errcode < 0)
+ break;
+
+ e = pt_alloc_encoder(p->conf);
+ if (!e) {
+ fprintf(stderr, "pt_alloc_encoder failed\n");
+ errcode = -err_pt_lib;
+ break;
+ }
+
+ bytes_written = p_process(p, e);
+
+ pt_free_encoder(e);
+
+ if (bytes_written == -stop_process) {
+ errcode = p_gen_expfile(p);
+ break;
+ }
+ if (bytes_written < 0) {
+ errcode = bytes_written;
+ break;
+ }
+ if (fwrite(p->conf->begin, 1, bytes_written, p->ptfile)
+ != (size_t)bytes_written) {
+ fprintf(stderr, "write %s failed", p->ptfilename);
+ errcode = -err_file_write;
+ break;
+ }
+ }
+
+ /* If there is no directive left, there's nothing more to do. */
+ if (errcode == -err_no_directive)
+ return 0;
+
+ return errcode;
+}
+
+int parse(const char *pttfile, const struct pt_config *conf)
+{
+ int errcode;
+ struct parser *p;
+
+ p = p_alloc(pttfile, conf);
+ if (!p)
+ return -err_no_mem;
+
+ errcode = p_open_files(p);
+ if (errcode < 0)
+ goto error;
+
+ errcode = p_start(p);
+ p_close_files(p);
+
+error:
+ p_free(p);
+ return errcode;
+}
+
+int parse_empty(char *payload)
+{
+ if (!payload)
+ return 0;
+
+ strtok(payload, " ");
+ if (!payload || *payload == '\0')
+ return 0;
+
+ return -err_parse_trailing_tokens;
+}
+
+int parse_tnt(uint64_t *tnt, uint8_t *size, char *payload)
+{
+ char c;
+
+ if (bug_on(!size))
+ return -err_internal;
+
+ if (bug_on(!tnt))
+ return -err_internal;
+
+ *size = 0;
+ *tnt = 0ull;
+
+ if (!payload)
+ return 0;
+
+ while (*payload != '\0') {
+ c = *payload;
+ payload++;
+ if (isspace(c) || c == '.')
+ continue;
+ *size += 1;
+ *tnt <<= 1;
+ switch (c) {
+ case 'n':
+ break;
+ case 't':
+ *tnt |= 1;
+ break;
+ default:
+ return -err_parse_unknown_char;
+ }
+ }
+
+ return 0;
+}
+
+static int ipc_from_uint32(enum pt_ip_compression *ipc, uint32_t val)
+{
+ switch (val) {
+ case pt_ipc_suppressed:
+ case pt_ipc_update_16:
+ case pt_ipc_update_32:
+ case pt_ipc_update_48:
+ case pt_ipc_sext_48:
+ case pt_ipc_full:
+ *ipc = (enum pt_ip_compression) val;
+ return 0;
+ }
+ return -err_parse_ipc;
+}
+
+int parse_ip(struct parser *p, uint64_t *ip, enum pt_ip_compression *ipc,
+ char *payload)
+{
+ uint32_t ipcval;
+ int errcode;
+
+ if (bug_on(!ip))
+ return -err_internal;
+
+ if (bug_on(!ipc))
+ return -err_internal;
+
+ *ipc = pt_ipc_suppressed;
+ *ip = 0;
+
+ payload = strtok(payload, " :");
+ if (!payload || *payload == '\0')
+ return -err_parse_no_args;
+
+ errcode = str_to_uint32(payload, &ipcval, 0);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = ipc_from_uint32(ipc, ipcval);
+ if (errcode < 0)
+ return errcode;
+
+ payload = strtok(NULL, " :");
+ if (!payload)
+ return -err_parse_ip_missing;
+
+ /* can be resolved to a label? */
+ if (*payload == '%') {
+ if (!p)
+ return -err_internal;
+
+ errcode = yasm_lookup_label(p->y, ip, payload + 1);
+ if (errcode < 0)
+ return errcode;
+ } else {
+ /* can be parsed as address? */
+ errcode = str_to_uint64(payload, ip, 0);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ /* no more tokens left. */
+ payload = strtok(NULL, " ");
+ if (payload)
+ return -err_parse_trailing_tokens;
+
+ return 0;
+}
+
+int parse_uint64(uint64_t *x, char *payload)
+{
+ int errcode;
+
+ if (bug_on(!x))
+ return -err_internal;
+
+ payload = strtok(payload, " ,");
+ if (!payload)
+ return -err_parse_no_args;
+
+ errcode = str_to_uint64(payload, x, 0);
+ if (errcode < 0)
+ return errcode;
+
+ return 0;
+}
+
+int parse_uint32(uint32_t *x, char *payload)
+{
+ int errcode;
+
+ if (bug_on(!x))
+ return -err_internal;
+
+ payload = strtok(payload, " ,");
+ if (!payload)
+ return -err_parse_no_args;
+
+ errcode = str_to_uint32(payload, x, 0);
+ if (errcode < 0)
+ return errcode;
+
+ return 0;
+}
+
+int parse_uint16(uint16_t *x, char *payload)
+{
+ int errcode;
+
+ if (bug_on(!x))
+ return -err_internal;
+
+ payload = strtok(payload, " ,");
+ if (!payload)
+ return -err_parse_no_args;
+
+ errcode = str_to_uint16(payload, x, 0);
+ if (errcode < 0)
+ return errcode;
+
+ return 0;
+}
+
+int parse_uint8(uint8_t *x, char *payload)
+{
+ int errcode;
+
+ if (bug_on(!x))
+ return -err_internal;
+
+ payload = strtok(payload, " ,");
+ if (!payload)
+ return -err_parse_no_args;
+
+ errcode = str_to_uint8(payload, x, 0);
+ if (errcode < 0)
+ return errcode;
+
+ return 0;
+}
+
+int parse_tma(uint16_t *ctc, uint16_t *fc, char *payload)
+{
+ char *endptr;
+ long int i;
+
+ if (bug_on(!ctc || !fc))
+ return -err_internal;
+
+ payload = strtok(payload, ",");
+ if (!payload || *payload == '\0')
+ return -err_parse_no_args;
+
+ i = strtol(payload, &endptr, 0);
+ if (payload == endptr || *endptr != '\0')
+ return -err_parse_int;
+
+ if (i > 0xffffl)
+ return -err_parse_int_too_big;
+
+ *ctc = (uint16_t)i;
+
+ payload = strtok(NULL, " ,");
+ if (!payload)
+ return -err_parse_no_args;
+
+ i = strtol(payload, &endptr, 0);
+ if (payload == endptr || *endptr != '\0')
+ return -err_parse_int;
+
+ if (i > 0xffffl)
+ return -err_parse_int_too_big;
+
+ *fc = (uint16_t)i;
+
+ /* no more tokens left. */
+ payload = strtok(NULL, " ");
+ if (payload)
+ return -err_parse_trailing_tokens;
+
+ return 0;
+}
diff --git a/pttc/src/posix/util.c b/pttc/src/posix/util.c
new file mode 100644
index 000000000000..02ad207c1113
--- /dev/null
+++ b/pttc/src/posix/util.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "errcode.h"
+#include "util.h"
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+int run(const char *file, char *const argv[])
+{
+ pid_t pid;
+ int status;
+
+ if (bug_on(!file))
+ return -err_internal;
+
+ if (bug_on(!argv))
+ return -err_internal;
+
+ pid = fork();
+
+ if (!pid) {
+ execvp(file, argv);
+ perror(argv[0]);
+ exit(1);
+ }
+ if (waitpid(pid, &status, 0) < 0)
+ return -err_other;
+
+ if (!WIFEXITED(status))
+ return -err_other;
+
+ if (WEXITSTATUS(status))
+ return -err_run;
+
+ return 0;
+}
diff --git a/pttc/src/pttc.c b/pttc/src/pttc.c
new file mode 100644
index 000000000000..4250c595e0b2
--- /dev/null
+++ b/pttc/src/pttc.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "errcode.h"
+#include "pttc.h"
+#include "parse.h"
+
+int pttc_main(const struct pttc_options *options)
+{
+ int errcode;
+ enum { buflen = 1024 };
+ uint8_t buf[buflen];
+ struct pt_config conf;
+
+ pt_config_init(&conf);
+ conf.cpu = options->cpu;
+ conf.begin = buf;
+ conf.end = buf+buflen;
+
+ /* apply errata for the chosen cpu. */
+ if (conf.cpu.vendor) {
+ errcode = pt_cpu_errata(&conf.errata, &conf.cpu);
+ if (errcode < 0) {
+ fprintf(stderr,
+ "fatal: errata configuration failed %d: %s\n",
+ errcode, pt_errstr(pt_errcode(errcode)));
+ return errcode;
+ }
+ }
+
+ errcode = parse(options->pttfile, &conf);
+ if (errcode < 0 && errcode != -err_run)
+ fprintf(stderr, "fatal: %s\n", errstr[-errcode]);
+
+ return -errcode;
+
+}
diff --git a/pttc/src/util.c b/pttc/src/util.c
new file mode 100644
index 000000000000..517280e403d2
--- /dev/null
+++ b/pttc/src/util.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "errcode.h"
+#include "util.h"
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+char *duplicate_str(const char *s)
+{
+ char *dup;
+
+ if (!s)
+ return NULL;
+
+ dup = malloc(strlen(s)+1);
+ if (!dup)
+ return NULL;
+ return strcpy(dup, s);
+}
+
+int str_to_uint64(const char *str, uint64_t *val, int base)
+{
+ char *endptr;
+ uint64_t x;
+
+ if (!str || !val)
+ return -err_internal;
+
+ errno = 0;
+ x = strtoull(str, &endptr, base);
+
+ if (errno == EINVAL)
+ return -err_parse_int;
+
+ if (errno == ERANGE)
+ return -err_parse_int_too_big;
+
+ if (str == endptr || *endptr != '\0')
+ return -err_parse_int;
+
+ *val = x;
+
+ return 0;
+}
+
+int str_to_uint32(const char *str, uint32_t *val, int base)
+{
+ uint64_t x;
+ int errcode;
+
+ if (!str || !val)
+ return -err_internal;
+
+ errcode = str_to_uint64(str, &x, base);
+ if (errcode < 0)
+ return errcode;
+
+ if (UINT32_MAX < x)
+ return -err_parse_int_too_big;
+
+ *val = (uint32_t) x;
+ return 0;
+}
+
+int str_to_uint16(const char *str, uint16_t *val, int base)
+{
+ uint64_t x;
+ int errcode;
+
+ if (!str || !val)
+ return -err_internal;
+
+ errcode = str_to_uint64(str, &x, base);
+ if (errcode < 0)
+ return errcode;
+
+ if (UINT16_MAX < x)
+ return -err_parse_int_too_big;
+
+ *val = (uint16_t) x;
+ return 0;
+}
+
+int str_to_uint8(const char *str, uint8_t *val, int base)
+{
+ uint64_t x;
+ int errcode;
+
+ if (!str || !val)
+ return -err_internal;
+
+ errcode = str_to_uint64(str, &x, base);
+ if (errcode < 0)
+ return errcode;
+
+ if (UINT8_MAX < x)
+ return -err_parse_int_too_big;
+
+ *val = (uint8_t) x;
+ return 0;
+}
+
+int do_bug_on(int cond, const char *condstr, const char *file, int line)
+{
+ if (cond)
+ fprintf(stderr, "%s:%d: internal error: %s\n", file, line,
+ condstr);
+ return cond;
+}
+struct label *l_alloc(void)
+{
+ return calloc(1, sizeof(struct label));
+}
+
+void l_free(struct label *l)
+{
+ if (!l)
+ return;
+
+ l_free(l->next);
+ free(l->name);
+ free(l);
+}
+
+int l_append(struct label *l, const char *name, uint64_t addr)
+{
+ int errcode;
+
+ if (bug_on(!l))
+ return -err_internal;
+
+ if (bug_on(!name))
+ return -err_internal;
+
+ /* skip to the last label. */
+ while (l->next) {
+ l = l->next;
+
+ /* ignore the first label, which has no name. */
+ if (strcmp(l->name, name) == 0)
+ return -err_label_not_unique;
+ }
+
+ /* append a new label. */
+ l->next = l_alloc();
+ if (!l->next)
+ return -err_no_mem;
+
+ /* save the name. */
+ l->next->name = duplicate_str(name);
+ if (!l->next->name) {
+ errcode = -err_no_mem;
+ goto error;
+ }
+
+ /* save the address. */
+ l->next->addr = addr;
+
+ return 0;
+error:
+ free(l->next->name);
+ free(l->next);
+ l->next = NULL;
+ return errcode;
+}
+
+int l_lookup(const struct label *l, uint64_t *addr,
+ const char *name)
+{
+ if (bug_on(!l))
+ return -err_internal;
+
+ if (bug_on(!addr))
+ return -err_internal;
+
+ if (bug_on(!name))
+ return -err_internal;
+
+
+ *addr = 0;
+ while (l->next) {
+ l = l->next;
+ if (strcmp(l->name, name) == 0) {
+ *addr = l->addr;
+ return 0;
+ }
+ }
+ return -err_no_label;
+}
+
+struct label *l_find(struct label *l, const char *name)
+{
+ if (bug_on(!l))
+ return NULL;
+
+ if (bug_on(!name))
+ return NULL;
+
+
+ while (l->next) {
+ l = l->next;
+
+ if (bug_on(!l->name))
+ continue;
+
+ if (strcmp(l->name, name) == 0)
+ return l;
+ }
+ return NULL;
+}
diff --git a/pttc/src/windows/util.c b/pttc/src/windows/util.c
new file mode 100644
index 000000000000..f24fbf12834b
--- /dev/null
+++ b/pttc/src/windows/util.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "errcode.h"
+#include "util.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <string.h>
+#include <windows.h>
+
+
+int run(const char *file, char *const argv[])
+{
+ int errcode;
+
+ int i;
+ size_t size;
+ char *args;
+
+ STARTUPINFO si;
+ PROCESS_INFORMATION pi;
+ DWORD exit_code;
+
+ DWORD dwret;
+ BOOL bret;
+
+
+ errcode = 0;
+
+ if (bug_on(!file)) {
+ errcode = -err_internal;
+ goto out;
+ }
+
+ if (bug_on(!argv)) {
+ errcode = -err_internal;
+ goto out;
+ }
+
+
+ /* calculate length of command line - this is the cumulative length of
+ * all arguments, plus two quotation marks (to make it quoted strings
+ * and allow for spaces in file/path names), plus a space after each
+ * arguments as delimiter (after the last arguments it's a terminating
+ * zero-byte instead of the space). *
+ */
+ size = 0;
+ for (i = 0; argv[i]; ++i)
+ size += strlen(argv[i]) + 3;
+
+ /* allocate command line string */
+ args = calloc(size, 1);
+ if (!args)
+ return -err_no_mem;
+
+ /* construct command line string, putting quotation marks
+ * around every argument of the vector and a space after it
+ */
+ size = 0;
+ for (i = 0; argv[i]; ++i) {
+ args[size++] = '"';
+ strcpy(args + size, argv[i]);
+ size += strlen(argv[i]);
+ args[size++] = '"';
+ args[size++] = ' ';
+ }
+ /* transform last space into a terminating zero-byte and fix up size */
+ args[--size] = '\0';
+
+
+ /* initialize process/startup info */
+ memset(&pi, 0, sizeof(pi));
+ memset(&si, 0, sizeof(si));
+ si.cb = sizeof(si);
+
+ /* create process - since the first parameter is NULL, the
+ * second parameter represents a command as it would behave
+ * on a command shell
+ */
+ bret = CreateProcess(NULL, args,
+ NULL, NULL, FALSE, 0, NULL, NULL, &si, &pi);
+ if (!bret) {
+ errcode = -err_other;
+ goto out_args;
+ }
+
+ dwret = WaitForSingleObject(pi.hProcess, INFINITE);
+ if (dwret == WAIT_FAILED) {
+ errcode = -err_other;
+ goto out_handles;
+ }
+
+ bret = GetExitCodeProcess(pi.hProcess, &exit_code);
+ if (!bret) {
+ errcode = -err_other;
+ goto out_handles;
+ }
+
+ if (exit_code != 0)
+ errcode = -err_run;
+
+
+out_handles:
+ CloseHandle(pi.hProcess);
+ CloseHandle(pi.hThread);
+out_args:
+ free(args);
+out:
+ return errcode;
+}
diff --git a/pttc/src/yasm.c b/pttc/src/yasm.c
new file mode 100644
index 000000000000..284f225d1839
--- /dev/null
+++ b/pttc/src/yasm.c
@@ -0,0 +1,848 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "errcode.h"
+#include "file.h"
+#include "util.h"
+#include "yasm.h"
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if defined(_MSC_VER) && (_MSC_VER < 1900)
+# define snprintf _snprintf_c
+#endif
+
+
+static int create_section_label_name(char *label, int size, const char *name,
+ const char *attribute)
+{
+ int written;
+
+ written = snprintf(label, size, "section_%s_%s", name, attribute);
+ if (size <= written)
+ return -err_no_mem;
+
+ return 0;
+}
+
+static int add_section_label(struct label *l, const char *name,
+ const char *attribute, uint64_t value,
+ struct label **length)
+{
+ char label[255];
+ int errcode;
+
+ errcode = create_section_label_name(label, sizeof(label), name,
+ attribute);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = l_append(l, label, value);
+ if (errcode < 0)
+ return errcode;
+
+ if (length)
+ *length = l_find(l, label);
+
+ return 0;
+}
+
+static int parse_section_label(struct label *l, const char *name,
+ const char *attribute)
+{
+ uint64_t addr;
+ char *value;
+
+ value = strtok(NULL, " ]");
+ if (!value)
+ return -err_section_attribute_no_value;
+
+ if (sscanf(value, "%" PRIx64, &addr) != 1)
+ return -err_parse_int;
+
+ return add_section_label(l, name, attribute, addr, NULL);
+}
+
+static int parse_section(char *line, struct label *l, struct label **length)
+{
+ char *name, *attribute;
+ int errcode;
+
+ name = strtok(line, " ");
+ if (!name)
+ return -err_section_no_name;
+
+ /* we initialize the section's length to zero - it will be updated
+ * when we process the section's content.
+ */
+ errcode = add_section_label(l, name, "length", 0ull, length);
+ if (errcode < 0)
+ return errcode;
+
+ for (;;) {
+ attribute = strtok(NULL, " =]");
+ if (!attribute)
+ return 0;
+
+ if (strcmp(attribute, "start") == 0) {
+ errcode = parse_section_label(l, name, "start");
+ if (errcode < 0)
+ return errcode;
+ } else if (strcmp(attribute, "vstart") == 0) {
+ errcode = parse_section_label(l, name, "vstart");
+ if (errcode < 0)
+ return errcode;
+ } else
+ return -err_section_unknown_attribute;
+ }
+}
+
+static int lookup_section_label(struct label *l, const char *name,
+ const char *attribute, uint64_t *value)
+{
+ char label[255];
+ int errcode;
+
+ errcode = create_section_label_name(label, sizeof(label), name,
+ attribute);
+ if (errcode < 0)
+ return errcode;
+
+ return l_lookup(l, value, label);
+}
+
+static int lookup_section_vstart(struct label *l, char *line,
+ uint64_t *vstart)
+{
+ char *name;
+
+ name = strtok(line, " ");
+ if (!name)
+ return -err_section_no_name;
+
+ return lookup_section_label(l, name, "vstart", vstart);
+}
+
+int parse_yasm_labels(struct label *l, const struct text *t)
+{
+ int errcode, no_org_directive;
+ size_t i;
+ uint64_t base_addr;
+ enum { linelen = 1024 };
+ char line[linelen];
+ struct label *length;
+
+ if (bug_on(!t))
+ return -err_internal;
+
+ base_addr = 0;
+ no_org_directive = 1;
+ length = NULL;
+
+ /* determine base address from org directive and insert special
+ * section labels.
+ */
+ for (i = 0; i < t->n; i++) {
+ char *tmp;
+
+ errcode = text_line(t, line, linelen, i);
+ if (errcode < 0)
+ return errcode;
+
+ tmp = strstr(line, "[section");
+ if (tmp) {
+ tmp += strlen("[section");
+ errcode = parse_section(tmp, l, &length);
+ if (errcode < 0)
+ return errcode;
+ continue;
+ }
+
+ tmp = strstr(line, "[org");
+ if (tmp) {
+ base_addr = strtol(tmp+strlen("[org"), NULL, 0);
+
+ errcode = l_append(l, "org", base_addr);
+ if (errcode < 0)
+ return errcode;
+
+ no_org_directive = 0;
+ continue;
+ }
+
+ /* update the section_<name>_length label, if we have one.
+ *
+ * this must be last; it destroys @line.
+ */
+ if (length) {
+ uint64_t value, size;
+
+ tmp = strtok(line, " ");
+ if (!tmp)
+ continue;
+
+ /* we expect a line number. */
+ errcode = str_to_uint64(tmp, &value, 10);
+ if (errcode < 0)
+ continue;
+
+ tmp = strtok(NULL, " ");
+ if (!tmp)
+ continue;
+
+ /* we expect an address. */
+ errcode = str_to_uint64(tmp, &value, 16);
+ if (errcode < 0)
+ continue;
+
+ tmp = strtok(NULL, " ");
+ if (!tmp)
+ continue;
+
+ /* we expect an opcode. */
+ errcode = str_to_uint64(tmp, &value, 16);
+ if (errcode < 0)
+ continue;
+
+ /* we got an opcode - let's compute it's size. */
+ for (size = 0; value != 0; value >>= 8)
+ size += 1;
+
+ /* update the section_<name>_length label. */
+ length->addr += size;
+ }
+ }
+
+ if (no_org_directive)
+ return -err_no_org_directive;
+
+ for (i = 0; i < t->n; i++) {
+ char *tmp, *name;
+ uint64_t addr;
+
+ errcode = text_line(t, line, linelen, i);
+ if (errcode < 0)
+ goto error;
+
+ /* Change the base on section switches. */
+ tmp = strstr(line, "[section");
+ if (tmp) {
+ tmp += strlen("[section");
+ errcode = lookup_section_vstart(l, tmp, &base_addr);
+ if (errcode < 0)
+ return errcode;
+ continue;
+ }
+
+ /* skip line number count. */
+ tmp = strtok(line, " ");
+ if (!tmp)
+ continue;
+
+ /* the label can now be on the same line as the memory
+ * address or on a line by its own.
+ * we look at the next token and (1) if it looks like a
+ * label, we search in the following lines for the
+ * corresponding address; or (2) if it looks like an
+ * address, we store it and see if the token after the
+ * opcode looks like a token; or (3) none of the above,
+ * we continue with the next line.
+ */
+
+ /* second token after the line number count. it's
+ * either an address; or a label.
+ */
+ tmp = strtok(NULL, " ");
+ if (!tmp)
+ continue;
+
+ if (!make_label(tmp)) {
+ /* get address in case we find a label later. */
+ if (sscanf(tmp, "%" PRIx64, &addr) != 1)
+ continue;
+
+ /* skip the opcode token. */
+ tmp = strtok(NULL, " ");
+ if (!tmp)
+ continue;
+
+ /* this might be a label now. */
+ tmp = strtok(NULL, " ");
+ if (!make_label(tmp))
+ continue;
+
+ errcode = l_append(l, tmp, addr + base_addr);
+ if (errcode < 0)
+ goto error;
+ continue;
+ }
+ name = duplicate_str(tmp);
+ if (!name) {
+ errcode = -err_no_mem;
+ goto error;
+ }
+
+ /* there was a label so now an address needs to
+ * be found.
+ */
+ errcode = -err_label_addr;
+ for (i += 1; i < t->n; i++) {
+ int errcode_text;
+
+ errcode_text = text_line(t, line, linelen, i);
+ if (errcode_text < 0) {
+ errcode = errcode_text;
+ break;
+ }
+ if (sscanf(line, "%*d %" PRIx64 " %*x %*s", &addr)
+ == 1) {
+ errcode = l_append(l, name, addr + base_addr);
+ break;
+ }
+ }
+ if (errcode == -err_label_addr)
+ fprintf(stderr, "label '%s' has no address\n", name);
+ free(name);
+ if (errcode < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ l_free(l->next);
+ free(l->name);
+ l->next = NULL;
+ l->name = NULL;
+ return errcode;
+}
+
+int make_label(char *s)
+{
+ size_t n;
+
+ if (bug_on(!s))
+ return -err_internal;
+
+ n = strlen(s);
+ if (n == 0 || s[n-1] != ':')
+ return 0;
+
+ s[n-1] = '\0';
+ return 1;
+}
+
+struct state *st_alloc(void)
+{
+ return calloc(1, sizeof(struct state));
+}
+
+void st_free(struct state *st)
+{
+ if (!st)
+ return;
+
+ free(st->filename);
+ free(st->line);
+ free(st);
+}
+
+int st_print_err(const struct state *st, const char *s, int errcode)
+{
+ if (bug_on(!st))
+ return -err_internal;
+
+ if (bug_on(!(-err_max < errcode && errcode < 0)))
+ return -err_internal;
+
+ if (!s)
+ s = "";
+
+ fprintf(stderr, "%s:%d: error: %s (%s)\n", st->filename, st->n-1, s,
+ errstr[-errcode]);
+
+ return errcode;
+}
+
+/* Sets current @filename, increment (@inc) and line number (@n) in @st.
+ *
+ * Note that @filename, @inc and @n correspond to the yasm .lst file
+ * source file information.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ */
+static int st_set_file(struct state *st, const char *filename, int inc, int n)
+{
+ if (bug_on(!st))
+ return -err_internal;
+
+ if (bug_on(!filename))
+ return -err_internal;
+
+ free(st->filename);
+ st->filename = duplicate_str(filename);
+ if (!st->filename)
+ return -err_no_mem;
+ st->inc = inc;
+ st->n = n;
+ return 0;
+}
+
+/* Sets current line in @st to @s and increases the line number.
+ *
+ * Returns 0 on success; a negative enum errcode otherwise.
+ */
+static int st_update(struct state *st, const char *s)
+{
+ free(st->line);
+ st->line = duplicate_str(s);
+ if (!st->line)
+ return -err_no_mem;
+
+ st->n += st->inc;
+ return 0;
+}
+
+struct pt_directive *pd_alloc(size_t n)
+{
+ struct pt_directive *pd;
+
+ pd = calloc(1, sizeof(*pd));
+ if (!pd)
+ return NULL;
+
+ pd->name = malloc(n);
+ if (!pd->name)
+ goto error;
+
+ pd->payload = malloc(n);
+ if (!pd->payload)
+ goto error;
+
+ pd->nlen = n;
+ pd->plen = n;
+
+ return pd;
+
+error:
+ pd_free(pd);
+ return NULL;
+}
+
+void pd_free(struct pt_directive *pd)
+{
+ if (!pd)
+ return;
+
+ free(pd->name);
+ free(pd->payload);
+ free(pd);
+}
+
+int pd_set(struct pt_directive *pd, enum pt_directive_kind kind,
+ const char *name, const char *payload)
+{
+ if (bug_on(!pd))
+ return -err_internal;
+
+ if (bug_on(!name))
+ return -err_internal;
+
+ if (bug_on(!payload))
+ return -err_internal;
+
+ pd->kind = kind;
+ strncpy(pd->name, name, pd->nlen);
+ if (pd->nlen > 0)
+ pd->name[pd->nlen - 1] = '\0';
+ strncpy(pd->payload, payload, pd->plen);
+ if (pd->plen > 0)
+ pd->payload[pd->plen - 1] = '\0';
+
+ return 0;
+}
+
+/* Magic annotation markers. */
+static const char *pt_marker = "@pt ";
+
+#if defined(FEATURE_SIDEBAND)
+static const char *sb_marker = "@sb ";
+#endif
+
+int pd_parse(struct pt_directive *pd, struct state *st)
+{
+ enum pt_directive_kind kind;
+ char *line, *comment, *openpar, *closepar, *directive, *payload;
+ int errcode;
+ char *c;
+
+ if (bug_on(!pd))
+ return -err_internal;
+
+ if (bug_on(!st))
+ return -err_internal;
+
+
+ line = duplicate_str(st->line);
+ if (!line)
+ return -err_no_mem;
+
+ /* make line lower case. */
+ for (c = line; *c; ++c)
+ *c = (char) tolower(*c);
+
+ /* if the current line is not a comment or contains no magic marker
+ * -err_no_directive is returned.
+ */
+ errcode = -err_no_directive;
+
+ /* search where the comment begins. */
+ comment = strchr(line, ';');
+
+ /* if there is no comment in the line, we don't have anything to
+ * do.
+ */
+ if (!comment)
+ goto cleanup;
+
+ /* search for @pt marker. */
+ directive = strstr(comment+1, pt_marker);
+ if (directive) {
+ directive += strlen(pt_marker);
+ kind = pdk_pt;
+ } else {
+#if defined(FEATURE_SIDEBAND)
+ /* search for @sb marker. */
+ directive = strstr(comment+1, sb_marker);
+ if (directive) {
+ directive += strlen(sb_marker);
+ kind = pdk_sb;
+ } else
+#endif
+ goto cleanup;
+ }
+
+ /* skip leading whitespace. */
+ while (isspace(*directive))
+ directive += 1;
+
+ /* directive found, now parse the payload. */
+ errcode = 0;
+
+ /* find position of next '(', separating the directive and the
+ * payload.
+ */
+ openpar = strchr(directive, '(');
+ if (!openpar) {
+ errcode = -err_missing_openpar;
+ st_print_err(st, "invalid syntax", errcode);
+ goto cleanup;
+ }
+
+ /* find position of next ')', marking the end of the payload */
+ closepar = strchr(openpar, ')');
+ if (!closepar) {
+ errcode = -err_missing_closepar;
+ st_print_err(st, "invalid syntax", errcode);
+ goto cleanup;
+ }
+
+ /* make "multiple" strings by artifically terminating them with
+ * '\0' then get directive and payload substrings, which will
+ * have leading and trailing whitespace "removed".
+ */
+ *openpar = '\0';
+ *closepar = '\0';
+
+ payload = openpar+1;
+
+ errcode = pd_set(pd, kind, directive, payload);
+
+cleanup:
+ free(line);
+ return errcode;
+}
+
+static const char *bin_suffix = ".bin";
+static const char *lst_suffix = ".lst";
+static const char path_separator = '/';
+enum {
+ max_filename_len = 1024
+};
+
+struct yasm *yasm_alloc(const char *pttfile)
+{
+ char *tmp;
+ size_t n;
+ struct yasm *y;
+
+ if (bug_on(!pttfile))
+ return NULL;
+
+ y = calloc(1, sizeof(*y));
+ if (!y)
+ return NULL;
+
+ y->fl = fl_alloc();
+ if (!y->fl)
+ goto error;
+
+ y->st_asm = st_alloc();
+ if (!y->st_asm)
+ goto error;
+
+ y->fileroot = duplicate_str(pttfile);
+ if (!y->fileroot)
+ goto error;
+
+ y->pttfile = duplicate_str(pttfile);
+ if (!y->pttfile)
+ goto error;
+
+ tmp = strrchr(y->fileroot, '.');
+ if (tmp)
+ *tmp = '\0';
+
+ tmp = strrchr(y->fileroot, path_separator);
+ if (tmp) {
+ tmp += 1;
+ memmove(y->fileroot, tmp, strlen(tmp)+1);
+ }
+
+ y->binfile = malloc(strlen(y->fileroot)+strlen(bin_suffix)+1);
+ if (!y->binfile)
+ goto error;
+
+ y->lstfile = malloc(strlen(y->fileroot)+strlen(lst_suffix)+1);
+ if (!y->lstfile)
+ goto error;
+
+ n = strlen(y->fileroot);
+
+ strcpy(y->binfile, y->fileroot);
+ strcpy(y->binfile+n, bin_suffix);
+ strcpy(y->lstfile, y->fileroot);
+ strcpy(y->lstfile+n, lst_suffix);
+
+ y->l = l_alloc();
+ if (!y->l)
+ goto error;
+
+ return y;
+
+error:
+ yasm_free(y);
+ return 0;
+}
+
+static int yasm_run(struct yasm *y)
+{
+ char *argv[] = {
+ "yasm",
+ "<pttfile>",
+ "-f", "bin",
+ "-o", "<binfile>",
+ "-L", "nasm",
+ "-l", "<lstfile>",
+ NULL,
+ };
+
+ argv[1] = y->pttfile;
+ argv[5] = y->binfile;
+ argv[9] = y->lstfile;
+
+ return run(argv[0], argv);
+}
+
+int yasm_parse(struct yasm *y)
+{
+ int errcode;
+ const struct text *t;
+
+ if (bug_on(!y))
+ return -err_internal;
+
+ errcode = yasm_run(y);
+ if (errcode < 0)
+ goto error;
+
+ errcode = fl_gettext(y->fl, &t, y->lstfile);
+ if (errcode < 0)
+ goto error;
+
+ errcode = parse_yasm_labels(y->l, t);
+ if (errcode < 0)
+ goto error;
+
+error:
+ return errcode;
+}
+
+void yasm_free(struct yasm *y)
+{
+ if (!y)
+ return;
+
+ free(y->fileroot);
+ free(y->pttfile);
+ free(y->lstfile);
+ free(y->binfile);
+ fl_free(y->fl);
+ st_free(y->st_asm);
+ l_free(y->l);
+ free(y);
+}
+
+int yasm_lookup_label(const struct yasm *y, uint64_t *addr,
+ const char *labelname)
+{
+ if (bug_on(!y))
+ return -err_internal;
+
+
+ return l_lookup(y->l, addr, labelname);
+}
+
+static int yasm_advance_next_line(struct yasm *y)
+{
+ enum { slen = 1024 };
+ char s[slen];
+ char filename[max_filename_len];
+ int errcode;
+ int asm_line, asm_inc;
+
+ if (bug_on(!y))
+ return -err_internal;
+
+
+ for (;;) {
+ errcode = fl_getline(y->fl, s, slen, y->lstfile,
+ y->lst_curr_line);
+ /* always advance in lst file. */
+ y->lst_curr_line += 1;
+
+ if (errcode < 0)
+ break;
+
+ /* if the current lst file line is a line directive, set
+ * state information to this file, line and increment
+ * and continue.
+ */
+ if (sscanf(s, "%*d %%line %d+%d %1023[^\r\n]", &asm_line,
+ &asm_inc, filename) == 3) {
+ st_set_file(y->st_asm, filename, asm_line, asm_inc);
+ continue;
+ }
+
+ /* if line number or increment in the previous line
+ * directive is <= 0, the current lst line has no
+ * corresponding line in the source file.
+ */
+ if (y->st_asm->n <= 0 || y->st_asm->inc <= 0)
+ continue;
+
+ /* finally the current line in the lst file can be
+ * correlated to the source file, so we retrieve the
+ * line from it and update the state.
+ */
+ errcode = fl_getline(y->fl, s, slen, y->st_asm->filename,
+ y->st_asm->n-1);
+ st_update(y->st_asm, s);
+ break;
+ }
+
+ return errcode;
+}
+
+int yasm_pd_parse(struct yasm *y, struct pt_directive *pd)
+{
+ return pd_parse(pd, y->st_asm);
+}
+
+int yasm_next_pt_directive(struct yasm *y, struct pt_directive *pd)
+{
+ int errcode;
+
+ for (;;) {
+ errcode = yasm_advance_next_line(y);
+ if (errcode < 0)
+ break;
+
+ errcode = pd_parse(pd, y->st_asm);
+ if (errcode != -err_no_directive)
+ return errcode;
+
+ }
+ if (errcode == -err_out_of_range)
+ errcode = -err_no_directive;
+
+ return errcode;
+}
+
+int yasm_next_line(struct yasm *y, char *dest, size_t destlen)
+{
+ int errcode;
+
+ if (!destlen)
+ return 0;
+
+ if (bug_on(!dest))
+ return -err_internal;
+
+ errcode = yasm_advance_next_line(y);
+ if (errcode < 0)
+ return errcode;
+
+ strncpy(dest, y->st_asm->line, destlen);
+ dest[destlen-1] = '\0';
+
+ return 0;
+}
+
+int yasm_print_err(const struct yasm *y, const char *s, int errcode)
+{
+ if (bug_on(!y))
+ return -err_internal;
+
+
+ return st_print_err(y->st_asm, s, errcode);
+}
+
+int yasm_lookup_section_label(const struct yasm *y, const char *name,
+ const char *attribute, uint64_t *value)
+{
+ if (bug_on(!y))
+ return -err_internal;
+
+ return lookup_section_label(y->l, name, attribute, value);
+}
diff --git a/pttc/test/src/test_all_directives.ptt b/pttc/test/src/test_all_directives.ptt
new file mode 100644
index 000000000000..a1fc17c7f5d2
--- /dev/null
+++ b/pttc/test/src/test_all_directives.ptt
@@ -0,0 +1,58 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+org 0x42
+; @pt psb()
+; @pt psbend()
+; @pt pad()
+; @pt ovf()
+; @pt stop()
+; @pt tnt(tnnnt)
+; @pt tnt64(tnntnntnntt)
+; @pt tip(3: 0x1000)
+; @pt tip.pge(2: 0x2000)
+; @pt tip.pgd(1: 0x3000)
+; @pt fup(3: 0x4000)
+; @pt mode.exec(16bit)
+; @pt mode.tsx(begin)
+; @pt pip(0xafafaf)
+; @pt pip(0xafafaf, nr)
+; @pt tsc(12345)
+; @pt cbr(244)
+; @pt tma(0x257, 0x1cd)
+; @pt mtc(0xf0)
+; @pt cyc(0x3)
+; @pt cyc(0xfa3)
+; @pt .exp()
+;line1
+
+;line3
+
+ ; line5 trailing space
+; @pt .exp(extra)
+;a #comment
+;b
+;c
diff --git a/pttc/test/src/test_exp_labels.ptt b/pttc/test/src/test_exp_labels.ptt
new file mode 100644
index 000000000000..f8b9dba8713b
--- /dev/null
+++ b/pttc/test/src/test_exp_labels.ptt
@@ -0,0 +1,58 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+org 0x4242
+
+; @pt p1:psb()
+; @pt p2:psbend()
+l1:
+nop
+
+l2:
+nop
+
+l_3: nop
+
+; @pt .exp()
+;%l1 # print address of l1
+;(%l1) # print address of l1
+;%l1 %l2 # print address of l1 and l2
+;l1 %l2 # print address of l2
+;%l1 l2 # print address of l1
+;%0l1 # print address of l1 zero padded
+;%l2.0 # print zero
+;(%l2.0) # print zero
+;%l2.1 # print address of l2, only last byte.
+;%l2.2 # print address of l2, only last 2 bytes.
+;%0l2.2 # print address of l2, only last 2 bytes, zero padded.
+;%0l2.3 # print address of l2, last 3 bytes, zero padded.
+
+;%l_3 # print l_3
+
+;%p1 # print packet 1
+;%p2 # print packet 2
+
+;%eos # print eos byte offset
diff --git a/pttc/test/src/test_label_addr.ptt b/pttc/test/src/test_label_addr.ptt
new file mode 100644
index 000000000000..e5ee9049126c
--- /dev/null
+++ b/pttc/test/src/test_label_addr.ptt
@@ -0,0 +1,31 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+bits 64
+org 0x1000
+l1:
+nop
+l2:
diff --git a/ptunit/CMakeLists.txt b/ptunit/CMakeLists.txt
new file mode 100644
index 000000000000..b71b084bf4c2
--- /dev/null
+++ b/ptunit/CMakeLists.txt
@@ -0,0 +1,43 @@
+# Copyright (c) 2013-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+set(PTUNIT_FILES
+ src/ptunit.c
+)
+
+if (CMAKE_HOST_UNIX)
+ set(PTUNIT_FILES ${PTUNIT_FILES} src/posix/ptunit_mkfile.c)
+endif (CMAKE_HOST_UNIX)
+
+if (CMAKE_HOST_WIN32)
+ set(PTUNIT_FILES ${PTUNIT_FILES} src/windows/ptunit_mkfile.c)
+endif (CMAKE_HOST_WIN32)
+
+add_library(ptunit STATIC
+ ${PTUNIT_FILES}
+)
+
+add_ptunit_c_test(selftest)
diff --git a/ptunit/include/ptunit.h b/ptunit/include/ptunit.h
new file mode 100644
index 000000000000..7c54aad55486
--- /dev/null
+++ b/ptunit/include/ptunit.h
@@ -0,0 +1,463 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PTUNIT_H
+#define PTUNIT_H
+
+#include <stdint.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* A source location for reporting unit test fails. */
+struct ptunit_srcloc {
+ /* The source file. */
+ const char *file;
+
+ /* The source line. */
+ uint32_t line;
+};
+
+/* A unit test result type.
+ *
+ * This distinguishes the various potential results of a unit test.
+ */
+enum ptunit_result_type {
+ /* The test has passed. */
+ ptur_passed,
+
+ /* The test has been skipped. */
+ ptur_skipped,
+
+ /* The test failed a signed/unsigned integer comparison. */
+ ptur_failed_signed_int,
+ ptur_failed_unsigned_int,
+
+ /* The test failed a pointer comparison. */
+ ptur_failed_pointer,
+
+ /* The test failed a string comparison. */
+ ptur_failed_str
+};
+
+/* A unit test result.
+ *
+ * We separate test execution and result reporting. A unit test function
+ * returns a structured result that can later be used for reporting.
+ */
+struct ptunit_failed_signed_int {
+ /* The expression that failed. */
+ const char *expr;
+
+ /* A string representation of the comparison operation. */
+ const char *cmp;
+
+ /* The expected value. */
+ int64_t expected;
+
+ /* The actual value. */
+ int64_t actual;
+};
+
+struct ptunit_failed_unsigned_int {
+ /* The expression that failed. */
+ const char *expr;
+
+ /* A string representation of the comparison operation. */
+ const char *cmp;
+
+ /* The expected value. */
+ uint64_t expected;
+
+ /* The actual value. */
+ uint64_t actual;
+};
+
+struct ptunit_failed_pointer {
+ /* The expression that failed. */
+ const char *expr;
+
+ /* A string representation of the comparison operation. */
+ const char *cmp;
+
+ /* The expected value. */
+ const void *expected;
+
+ /* The actual value. */
+ const void *actual;
+};
+
+struct ptunit_failed_str {
+ /* The expression that failed. */
+ const char *expr;
+
+ /* A string representation of the comparison operation. */
+ const char *cmp;
+
+ /* The expected value. */
+ char *expected;
+
+ /* The actual value. */
+ char *actual;
+};
+
+struct ptunit_result {
+ /* The test result type. */
+ enum ptunit_result_type type;
+
+ /* Test result details depending on the result type. */
+ struct {
+ /* The source location of the fail. */
+ struct ptunit_srcloc where;
+
+ union {
+ struct ptunit_failed_signed_int signed_int;
+ struct ptunit_failed_unsigned_int unsigned_int;
+ struct ptunit_failed_pointer pointer;
+ struct ptunit_failed_str str;
+ } variant;
+ } failed;
+};
+
+/* A unit test function. */
+typedef struct ptunit_result (*ptunit_tfun_t)(void);
+
+/* A unit test.
+ *
+ * This is used for logging and reporting.
+ *
+ * It is not used for running tests or even for storing tests to be run at a
+ * later time.
+ */
+struct ptunit_test {
+ /* The test name. */
+ const char *name;
+
+ /* The optional test arguments. */
+ const char *args;
+
+ /* The test result. */
+ struct ptunit_result result;
+};
+
+/* A unit test suite.
+ *
+ * This is a simple summary of all tests that have been run.
+ */
+struct ptunit_suite {
+ /* An optional suite name. */
+ const char *name;
+
+ /* The number of total tests. */
+ uint32_t nr_tests;
+
+ /* The number of tests that have been skipped. */
+ uint32_t nr_skips;
+
+ /* The number of tests that have failed. */
+ uint32_t nr_fails;
+};
+
+/* Create a unit test source location. */
+extern struct ptunit_srcloc ptunit_mk_srcloc(const char *file, uint32_t line);
+
+#define ptu_here() ptunit_mk_srcloc(__FILE__, __LINE__)
+
+
+/* Create unit test passed and not run results. */
+extern struct ptunit_result ptunit_mk_passed(void);
+extern struct ptunit_result ptunit_mk_skipped(void);
+
+/* Create a unit test failed signed int result. */
+extern struct ptunit_result ptunit_mk_failed_signed_int(const char *expr,
+ const char *cmp,
+ struct ptunit_srcloc,
+ int64_t actual,
+ int64_t expected);
+
+#define ptunit_int_cmp(A, E, C) \
+ do { \
+ int64_t a = (A), e = (E); \
+ \
+ if (!(a C e)) \
+ return ptunit_mk_failed_signed_int(#A #C #E, #C, \
+ ptu_here(), \
+ a, e); \
+ } while (0)
+
+
+/* Create a unit test failed unsigned int result. */
+extern struct ptunit_result ptunit_mk_failed_unsigned_int(const char *expr,
+ const char *cmp,
+ struct ptunit_srcloc,
+ uint64_t actual,
+ uint64_t expected);
+
+#define ptunit_uint_cmp(A, E, C) \
+ do { \
+ uint64_t a = (A), e = (E); \
+ \
+ if (!(a C e)) \
+ return ptunit_mk_failed_unsigned_int(#A #C #E, #C, \
+ ptu_here(), \
+ a, e); \
+ } while (0)
+
+
+/* Create a unit test failed pointer result. */
+extern struct ptunit_result ptunit_mk_failed_pointer(const char *expr,
+ const char *cmp,
+ struct ptunit_srcloc,
+ const void *actual,
+ const void *expected);
+
+#define ptunit_ptr_cmp(A, E, C) \
+ do { \
+ const void *a = (A), *e = (E); \
+ \
+ if (!(a C e)) \
+ return ptunit_mk_failed_pointer(#A #C #E, #C, \
+ ptu_here(), \
+ a, e); \
+ } while (0)
+
+
+/* Create a unit test failed string result. */
+extern struct ptunit_result ptunit_mk_failed_str(const char *expr,
+ const char *cmp,
+ struct ptunit_srcloc,
+ const char *actual,
+ const char *expected);
+
+#define ptunit_str_cmp(A, E, C) \
+ do { \
+ const char *a = (A), *e = (E); \
+ \
+ if (!a || !e || !(strcmp(a, e) C 0)) \
+ return ptunit_mk_failed_str(#A "~"#C #E, "~"#C, \
+ ptu_here(), \
+ a, e); \
+ } while (0)
+
+
+/* Run a sub-unit test; return on fail. */
+
+#define ptunit_subtest(T, ...) \
+ do { \
+ struct ptunit_result result; \
+ \
+ result = (T)(__VA_ARGS__); \
+ if (result.type != ptur_passed) \
+ return result; \
+ } while (0)
+
+
+/* Run a sub-unit test; return on fail from here. */
+
+#define ptunit_check(T, ...) \
+ do { \
+ struct ptunit_result result; \
+ \
+ result = (T)(__VA_ARGS__); \
+ if (result.type != ptur_passed) { \
+ result.failed.where = ptu_here(); \
+ return result; \
+ } \
+ } while (0)
+
+
+/* Create a unit test. */
+extern struct ptunit_test ptunit_mk_test(const char *name, const char *args);
+
+/* Destroy a unit test. */
+extern void ptunit_fini_test(struct ptunit_test *);
+
+/* Create a unit test suite. */
+extern struct ptunit_suite ptunit_mk_suite(int argc, char **argv);
+
+/* Log a unit test result.
+ *
+ * This may also report test fails depending on the configuration.
+ */
+extern void ptunit_log_test(struct ptunit_suite *, const struct ptunit_test *);
+
+/* Print a summary report for a unit test suite.
+ *
+ * Returns the number of failed tests (capped to fit into an int) on success.
+ * Returns -1 if @suite is NULL.
+ */
+extern int ptunit_report(const struct ptunit_suite *suite);
+
+/* Run a single simple unit test and log its result. */
+
+#define ptunit_run(S, T) \
+ do { \
+ struct ptunit_test test; \
+ \
+ test = ptunit_mk_test(#T, NULL); \
+ test.result = (T)(); \
+ \
+ ptunit_log_test(S, &test); \
+ ptunit_fini_test(&test); \
+ } while (0)
+
+
+/* Run a single parameterized unit test and log its result. */
+
+#define ptunit_run_p(S, T, ...) \
+ do { \
+ struct ptunit_test test; \
+ \
+ test = ptunit_mk_test(#T, #__VA_ARGS__); \
+ test.result = (T)(__VA_ARGS__); \
+ \
+ ptunit_log_test(S, &test); \
+ ptunit_fini_test(&test); \
+ } while (0)
+
+
+/* Run a single unit test with fixture and an explict argument list.
+ *
+ * The first argument in the argument list is typically the fixture.
+ */
+
+#define ptunit_frun(R, T, F, ...) \
+ do { \
+ struct ptunit_result *pr = &(R); \
+ \
+ pr->type = ptur_passed; \
+ if ((F)->init) \
+ *pr = (F)->init(F); \
+ \
+ if (pr->type == ptur_passed) { \
+ *pr = (T)(__VA_ARGS__); \
+ \
+ if ((F)->fini) { \
+ if (pr->type == ptur_passed) \
+ *pr = (F)->fini(F); \
+ else \
+ (void) (F)->fini(F); \
+ } \
+ } \
+ } while (0)
+
+
+/* Run a single unit test with fixture and log its result. */
+
+#define ptunit_run_f(S, T, F) \
+ do { \
+ struct ptunit_test test; \
+ \
+ test = ptunit_mk_test(#T, #F); \
+ \
+ ptunit_frun(test.result, T, &(F), &(F)); \
+ \
+ ptunit_log_test(S, &test); \
+ ptunit_fini_test(&test); \
+ } while (0)
+
+
+/* Run a single parameterized unit test with fixture and log its result. */
+
+#define ptunit_run_fp(S, T, F, ...) \
+ do { \
+ struct ptunit_test test; \
+ \
+ test = ptunit_mk_test(#T, #F ", " #__VA_ARGS__); \
+ \
+ ptunit_frun(test.result, T, &(F), &(F), __VA_ARGS__); \
+ \
+ ptunit_log_test(S, &test); \
+ ptunit_fini_test(&test); \
+ } while (0)
+
+
+
+/* The actual macros to be used in unit tests.
+ *
+ * Do not use the above ptunit_ macros directly.
+ */
+
+#define ptu_int_eq(A, E) ptunit_int_cmp(A, E, ==)
+#define ptu_int_ne(A, E) ptunit_int_cmp(A, E, !=)
+#define ptu_int_gt(A, E) ptunit_int_cmp(A, E, >)
+#define ptu_int_ge(A, E) ptunit_int_cmp(A, E, >=)
+#define ptu_int_lt(A, E) ptunit_int_cmp(A, E, <)
+#define ptu_int_le(A, E) ptunit_int_cmp(A, E, <=)
+
+#define ptu_uint_eq(A, E) ptunit_uint_cmp(A, E, ==)
+#define ptu_uint_ne(A, E) ptunit_uint_cmp(A, E, !=)
+#define ptu_uint_gt(A, E) ptunit_uint_cmp(A, E, >)
+#define ptu_uint_ge(A, E) ptunit_uint_cmp(A, E, >=)
+#define ptu_uint_lt(A, E) ptunit_uint_cmp(A, E, <)
+#define ptu_uint_le(A, E) ptunit_uint_cmp(A, E, <=)
+
+#define ptu_ptr_eq(A, E) ptunit_ptr_cmp(A, E, ==)
+#define ptu_ptr_ne(A, E) ptunit_ptr_cmp(A, E, !=)
+#define ptu_ptr_gt(A, E) ptunit_ptr_cmp(A, E, >)
+#define ptu_ptr_ge(A, E) ptunit_ptr_cmp(A, E, >=)
+#define ptu_ptr_lt(A, E) ptunit_ptr_cmp(A, E, <)
+#define ptu_ptr_le(A, E) ptunit_ptr_cmp(A, E, <=)
+#define ptu_null(A) ptunit_ptr_cmp(A, NULL, ==)
+#define ptu_ptr(A) ptunit_ptr_cmp(A, NULL, !=)
+
+#define ptu_str_eq(A, E) ptunit_str_cmp(A, E, ==)
+#define ptu_str_ne(A, E) ptunit_str_cmp(A, E, !=)
+
+/* Indicate that a unit test passed. */
+#define ptu_passed() ptunit_mk_passed()
+
+/* Skip a unit test. */
+#define ptu_skipped() ptunit_mk_skipped()
+
+/* Run a sub-unit test; return on fail. */
+#define ptu_test(T, ...) ptunit_subtest(T, __VA_ARGS__)
+
+/* Run a sub-unit test; return on fail from here. */
+#define ptu_check(T, ...) ptunit_check(T, __VA_ARGS__)
+
+/* Run a single unit test. */
+#define ptu_run(S, T) ptunit_run(&(S), T)
+
+/* Run a single parameterized unit test. */
+#define ptu_run_p(S, T, ...) ptunit_run_p(&(S), T, __VA_ARGS__)
+
+/* Run a single unit test with fixture. */
+#define ptu_run_f(S, T, F) ptunit_run_f(&(S), T, F)
+
+/* Run a single parameterized unit test with fixture. */
+#define ptu_run_fp(S, T, F, ...) ptunit_run_fp(&(S), T, F, __VA_ARGS__)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PTUNIT_H */
diff --git a/ptunit/include/ptunit_mkfile.h b/ptunit/include/ptunit_mkfile.h
new file mode 100644
index 000000000000..9eadabeca9e8
--- /dev/null
+++ b/ptunit/include/ptunit_mkfile.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PTUNIT_MKFILE_H
+#define PTUNIT_MKFILE_H
+
+#include <stdio.h>
+
+
+/* Create a temporary file for unit testing.
+ *
+ * Creates a new file and opens it with @mode. On success, provides the file
+ * struct and file name in @file and @filename respectively.
+ *
+ * The @file needs to be closed and the @filename needs to be freed after use.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @file or @filename is NULL.
+ * Returns -pte_nomem if @filename can't be allocated.
+ */
+int ptunit_mkfile(FILE **file, char **filename, const char *mode);
+
+#endif /* PTUNIT_MKFILE_H */
diff --git a/ptunit/include/ptunit_threads.h b/ptunit/include/ptunit_threads.h
new file mode 100644
index 000000000000..557e9e501b6e
--- /dev/null
+++ b/ptunit/include/ptunit_threads.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PTUNIT_THREADS_H
+#define PTUNIT_THREADS_H
+
+#include "ptunit.h"
+
+#if defined(FEATURE_THREADS)
+# include <threads.h>
+#endif /* defined(FEATURE_THREADS) */
+
+
+/* The maximal number of threads. */
+enum {
+ ptu_thrd_max = 16
+};
+
+/* A test fixture component providing threading support. */
+struct ptunit_thrd_fixture {
+#if defined(FEATURE_THREADS)
+
+ /* An array of threads created by ptunit_thrd_create(). */
+ thrd_t threads[ptu_thrd_max];
+
+ /* A lock protecting the outer fixture. We don't need it. */
+ mtx_t lock;
+
+#endif /* defined(FEATURE_THREADS) */
+
+ /* The actual number of created threads. */
+ uint8_t nthreads;
+
+ /* The result of joined threads. */
+ int result[ptu_thrd_max];
+};
+
+
+static inline struct ptunit_result
+ptunit_thrd_init(struct ptunit_thrd_fixture *tfix)
+{
+ ptu_ptr(tfix);
+
+ memset(tfix, 0, sizeof(*tfix));
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_init(&tfix->lock, mtx_plain);
+ ptu_int_eq(errcode, thrd_success);
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return ptu_passed();
+}
+
+static inline struct ptunit_result
+ptunit_thrd_fini(struct ptunit_thrd_fixture *tfix)
+{
+ ptu_ptr(tfix);
+
+#if defined(FEATURE_THREADS)
+ {
+ int thrd, errcode[ptu_thrd_max];
+
+ for (thrd = 0; thrd < tfix->nthreads; ++thrd)
+ errcode[thrd] = thrd_join(&tfix->threads[thrd],
+ &tfix->result[thrd]);
+
+ mtx_destroy(&tfix->lock);
+
+ for (thrd = 0; thrd < tfix->nthreads; ++thrd)
+ ptu_int_eq(errcode[thrd], thrd_success);
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return ptu_passed();
+}
+
+#if defined(FEATURE_THREADS)
+
+static inline struct ptunit_result
+ptunit_thrd_create(struct ptunit_thrd_fixture *tfix, int (*worker)(void *),
+ void *arg)
+{
+ int errcode;
+
+ ptu_ptr(tfix);
+
+ errcode = thrd_create(&tfix->threads[tfix->nthreads++], worker, arg);
+ ptu_int_eq(errcode, thrd_success);
+
+ return ptu_passed();
+}
+
+#endif /* defined(FEATURE_THREADS) */
+
+static inline struct ptunit_result
+ptunit_thrd_lock(struct ptunit_thrd_fixture *tfix)
+{
+ ptu_ptr(tfix);
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&tfix->lock);
+ ptu_int_eq(errcode, thrd_success);
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return ptu_passed();
+}
+
+static inline struct ptunit_result
+ptunit_thrd_unlock(struct ptunit_thrd_fixture *tfix)
+{
+ ptu_ptr(tfix);
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&tfix->lock);
+ ptu_int_eq(errcode, thrd_success);
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return ptu_passed();
+}
+
+#endif /* PTUNIT_THREADS_H */
diff --git a/ptunit/src/posix/ptunit_mkfile.c b/ptunit/src/posix/ptunit_mkfile.c
new file mode 100644
index 000000000000..c602fd556d2f
--- /dev/null
+++ b/ptunit/src/posix/ptunit_mkfile.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit_mkfile.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+
+int ptunit_mkfile(FILE **pfile, char **pfilename, const char *mode)
+{
+ FILE *file;
+ const char *tmpdir;
+ const char *tmpfile;
+ char template[256], *filename;
+ int fd, len;
+
+ tmpfile = "ptunit-tmp-XXXXXX";
+ tmpdir = getenv("TMP");
+ if (!tmpdir || !tmpdir[0])
+ tmpdir = "/tmp";
+
+ len = snprintf(template, sizeof(template), "%s/%s", tmpdir, tmpfile);
+ if (len < 0)
+ return -pte_not_supported;
+
+ /* We must not truncate the template. */
+ if (sizeof(template) <= (size_t) len)
+ return -pte_not_supported;
+
+ fd = mkstemp(template);
+ if (fd == -1)
+ return -pte_not_supported;
+
+ file = fdopen(fd, mode);
+ if (!file) {
+ close(fd);
+ return -pte_not_supported;
+ }
+
+ filename = strdup(template);
+ if (!filename) {
+ fclose(file);
+ return -pte_nomem;
+ }
+
+ *pfile = file;
+ *pfilename = filename;
+
+ return 0;
+}
diff --git a/ptunit/src/ptunit.c b/ptunit/src/ptunit.c
new file mode 100644
index 000000000000..e4a5b5a72731
--- /dev/null
+++ b/ptunit/src/ptunit.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <string.h>
+
+
+struct ptunit_srcloc ptunit_mk_srcloc(const char *file, uint32_t line)
+{
+ struct ptunit_srcloc srcloc;
+
+ srcloc.file = file;
+ srcloc.line = line;
+
+ return srcloc;
+}
+
+struct ptunit_result ptunit_mk_failed_signed_int(const char *expr,
+ const char *cmp,
+ struct ptunit_srcloc where,
+ int64_t actual,
+ int64_t expected)
+{
+ struct ptunit_result result;
+
+ result.type = ptur_failed_signed_int;
+ result.failed.where = where;
+ result.failed.variant.signed_int.expr = expr;
+ result.failed.variant.signed_int.cmp = cmp;
+ result.failed.variant.signed_int.expected = expected;
+ result.failed.variant.signed_int.actual = actual;
+
+ return result;
+}
+
+struct ptunit_result ptunit_mk_failed_unsigned_int(const char *expr,
+ const char *cmp,
+ struct ptunit_srcloc where,
+ uint64_t actual,
+ uint64_t expected)
+{
+ struct ptunit_result result;
+
+ result.type = ptur_failed_unsigned_int;
+ result.failed.where = where;
+ result.failed.variant.unsigned_int.expr = expr;
+ result.failed.variant.unsigned_int.cmp = cmp;
+ result.failed.variant.unsigned_int.expected = expected;
+ result.failed.variant.unsigned_int.actual = actual;
+
+ return result;
+}
+
+struct ptunit_result ptunit_mk_failed_pointer(const char *expr,
+ const char *cmp,
+ struct ptunit_srcloc where,
+ const void *actual,
+ const void *expected)
+{
+ struct ptunit_result result;
+
+ result.type = ptur_failed_pointer;
+ result.failed.where = where;
+ result.failed.variant.pointer.expr = expr;
+ result.failed.variant.pointer.cmp = cmp;
+ result.failed.variant.pointer.expected = expected;
+ result.failed.variant.pointer.actual = actual;
+
+ return result;
+}
+
+static char *dupstr(const char *str)
+{
+ char *dup;
+ size_t len;
+
+ if (!str)
+ str = "(null)";
+
+ len = strlen(str);
+ dup = malloc(len + 1);
+ if (!dup)
+ return NULL;
+
+ strncpy(dup, str, len);
+ dup[len] = 0;
+
+ return dup;
+}
+
+struct ptunit_result ptunit_mk_failed_str(const char *expr,
+ const char *cmp,
+ struct ptunit_srcloc where,
+ const char *actual,
+ const char *expected)
+{
+ struct ptunit_result result;
+
+ result.type = ptur_failed_str;
+ result.failed.where = where;
+ result.failed.variant.str.expr = expr;
+ result.failed.variant.str.cmp = cmp;
+ result.failed.variant.str.expected = dupstr(expected);
+ result.failed.variant.str.actual = dupstr(actual);
+
+ return result;
+}
+
+struct ptunit_result ptunit_mk_passed(void)
+{
+ struct ptunit_result result;
+
+ memset(&result, 0, sizeof(result));
+ result.type = ptur_passed;
+
+ return result;
+}
+
+struct ptunit_result ptunit_mk_skipped(void)
+{
+ struct ptunit_result result;
+
+ memset(&result, 0, sizeof(result));
+ result.type = ptur_skipped;
+
+ return result;
+}
+
+struct ptunit_test ptunit_mk_test(const char *name, const char *args)
+{
+ struct ptunit_test test;
+
+ test.name = name;
+ test.args = args;
+ test.result = ptunit_mk_skipped();
+
+ return test;
+}
+
+void ptunit_fini_test(struct ptunit_test *test)
+{
+ if (!test)
+ return;
+
+ switch (test->result.type) {
+ case ptur_skipped:
+ case ptur_passed:
+ case ptur_failed_signed_int:
+ case ptur_failed_unsigned_int:
+ case ptur_failed_pointer:
+ break;
+
+ case ptur_failed_str:
+ free(test->result.failed.variant.str.expected);
+ free(test->result.failed.variant.str.actual);
+ break;
+ }
+}
+
+struct ptunit_suite ptunit_mk_suite(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ memset(&suite, 0, sizeof(suite));
+
+ if (argc && argv)
+ suite.name = argv[0];
+ return suite;
+}
+
+static void ptunit_print_test(const struct ptunit_test *test)
+{
+ fprintf(stderr, "%s", test->name);
+
+ if (test->args)
+ fprintf(stderr, "(%s)", test->args);
+
+ fprintf(stderr, ": ");
+}
+
+static const char *basename(const char *file)
+{
+ const char *base;
+
+ if (!file)
+ return NULL;
+
+ for (base = file + strlen(file); base != file; base -= 1) {
+ char ch;
+
+ ch = base[-1];
+ if ((ch == '/') || (ch == '\\'))
+ break;
+ }
+
+ return base;
+}
+
+static void ptunit_print_srcloc(const struct ptunit_test *test)
+{
+ const char *file;
+
+ switch (test->result.type) {
+ case ptur_passed:
+ case ptur_skipped:
+ fprintf(stderr, "n/a: ");
+ break;
+
+ case ptur_failed_signed_int:
+ case ptur_failed_unsigned_int:
+ case ptur_failed_pointer:
+ case ptur_failed_str:
+ file = basename(test->result.failed.where.file);
+ if (!file)
+ file = "<unknown>";
+
+ fprintf(stderr, "%s:%" PRIu32 ": ", file,
+ test->result.failed.where.line);
+ break;
+ }
+}
+
+static void ptunit_report_test(const struct ptunit_test *test)
+{
+ switch (test->result.type) {
+ case ptur_skipped:
+ case ptur_passed:
+ return;
+
+ case ptur_failed_signed_int:
+ ptunit_print_test(test);
+ ptunit_print_srcloc(test);
+ fprintf(stderr, "%s [%" PRId64 "%s%" PRId64 "] failed.\n",
+ test->result.failed.variant.signed_int.expr,
+ test->result.failed.variant.signed_int.actual,
+ test->result.failed.variant.signed_int.cmp,
+ test->result.failed.variant.signed_int.expected);
+ return;
+
+ case ptur_failed_unsigned_int:
+ ptunit_print_test(test);
+ ptunit_print_srcloc(test);
+ fprintf(stderr, "%s [0x%" PRIx64 "%s0x%" PRIx64 "] failed.\n",
+ test->result.failed.variant.unsigned_int.expr,
+ test->result.failed.variant.unsigned_int.actual,
+ test->result.failed.variant.unsigned_int.cmp,
+ test->result.failed.variant.unsigned_int.expected);
+ return;
+
+ case ptur_failed_pointer:
+ ptunit_print_test(test);
+ ptunit_print_srcloc(test);
+ fprintf(stderr, "%s [%p%s%p] failed.\n",
+ test->result.failed.variant.pointer.expr,
+ test->result.failed.variant.pointer.actual,
+ test->result.failed.variant.pointer.cmp,
+ test->result.failed.variant.pointer.expected);
+ return;
+
+ case ptur_failed_str:
+ ptunit_print_test(test);
+ ptunit_print_srcloc(test);
+ fprintf(stderr, "%s [%s%s%s] failed.\n",
+ test->result.failed.variant.str.expr,
+ test->result.failed.variant.str.actual,
+ test->result.failed.variant.str.cmp,
+ test->result.failed.variant.str.expected);
+ return;
+ }
+
+ ptunit_print_test(test);
+ fprintf(stderr, "bad result type: 0x%" PRIx32 ".\n", test->result.type);
+}
+
+void ptunit_log_test(struct ptunit_suite *suite,
+ const struct ptunit_test *test)
+{
+ if (!test)
+ return;
+
+ if (suite) {
+ suite->nr_tests += 1;
+
+ if (test->result.type == ptur_skipped)
+ suite->nr_skips += 1;
+ else if (test->result.type != ptur_passed)
+ suite->nr_fails += 1;
+ }
+
+ ptunit_report_test(test);
+}
+
+int ptunit_report(const struct ptunit_suite *suite)
+{
+ if (!suite)
+ return -1;
+
+ if (suite->name)
+ fprintf(stdout, "%s: ", suite->name);
+
+ fprintf(stdout,
+ "tests: %" PRIu32 ", passes: %" PRIu32 ", fails: %" PRIu32,
+ suite->nr_tests,
+ suite->nr_tests - (suite->nr_fails + suite->nr_skips),
+ suite->nr_fails);
+
+ if (suite->nr_skips)
+ fprintf(stdout, " (skipped: %" PRIu32 ")", suite->nr_skips);
+
+ fprintf(stdout, "\n");
+
+ if (INT32_MAX < suite->nr_fails)
+ return INT32_MAX;
+
+ return (int) suite->nr_fails;
+}
diff --git a/ptunit/src/windows/ptunit_mkfile.c b/ptunit/src/windows/ptunit_mkfile.c
new file mode 100644
index 000000000000..020b37095a51
--- /dev/null
+++ b/ptunit/src/windows/ptunit_mkfile.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit_mkfile.h"
+
+#include "intel-pt.h"
+
+#include <windows.h>
+#include <string.h>
+
+
+int ptunit_mkfile(FILE **pfile, char **pfilename, const char *mode)
+{
+ char dirbuffer[MAX_PATH], buffer[MAX_PATH], *filename;
+ const char *dirname;
+ FILE *file;
+ DWORD dirlen;
+ UINT status;
+
+ /* We only support char-based strings. */
+ if (sizeof(TCHAR) != sizeof(char))
+ return -pte_not_supported;
+
+ dirname = dirbuffer;
+ dirlen = GetTempPath(sizeof(dirbuffer), dirbuffer);
+ if (!dirlen || dirlen >= sizeof(dirbuffer))
+ dirname = ".";
+
+ status = GetTempFileName(dirname, "ptunit-tmp-", 0, buffer);
+ if (!status)
+ return -pte_not_supported;
+
+ file = fopen(buffer, mode);
+ if (!file)
+ return -pte_not_supported;
+
+ filename = _strdup(buffer);
+ if (!filename) {
+ fclose(file);
+ return -pte_nomem;
+ }
+
+ *pfile = file;
+ *pfilename = filename;
+
+ return 0;
+}
diff --git a/ptunit/test/src/ptunit-selftest.c b/ptunit/test/src/ptunit-selftest.c
new file mode 100644
index 000000000000..ed829c28df1c
--- /dev/null
+++ b/ptunit/test/src/ptunit-selftest.c
@@ -0,0 +1,469 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include <stdlib.h>
+
+
+static struct ptunit_result cmp_pass(void)
+{
+ int zero = 0, one = 1, neg = -1;
+ const char *szero = "zero", *sone = "one", *null = NULL;
+
+ ptu_int_eq(zero, 0);
+ ptu_int_ne(zero, one);
+ ptu_int_lt(neg, 0);
+ ptu_int_gt(zero, neg);
+
+ ptu_uint_eq(zero, 0);
+ ptu_uint_ne(zero, one);
+ ptu_uint_lt(zero, one);
+ ptu_uint_gt(neg, one);
+
+ ptu_ptr_eq(szero, szero);
+ ptu_ptr_ne(szero, sone);
+ ptu_null(null);
+ ptu_ptr(szero);
+
+ ptu_str_eq(szero, szero);
+ ptu_str_ne(szero, sone);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result int_eq_fail(void)
+{
+ int zero = 0, one = 1;
+
+ ptu_int_eq(zero, one);
+
+ return ptu_skipped();
+}
+
+static struct ptunit_result int_fail(void)
+{
+ struct ptunit_result result;
+
+ result = int_eq_fail();
+
+ ptu_uint_eq(result.type, ptur_failed_signed_int);
+ ptu_str_eq(result.failed.where.file, __FILE__);
+ ptu_uint_lt(result.failed.where.line, __LINE__);
+ ptu_str_eq(result.failed.variant.signed_int.expr, "zero==one");
+ ptu_str_eq(result.failed.variant.signed_int.cmp, "==");
+ ptu_int_eq(result.failed.variant.signed_int.expected, 1);
+ ptu_int_eq(result.failed.variant.signed_int.actual, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result uint_eq_fail(void)
+{
+ uint16_t zero = 0, one = 1;
+
+ ptu_uint_eq(zero, one);
+
+ return ptu_skipped();
+}
+
+static struct ptunit_result uint_fail(void)
+{
+ struct ptunit_result result;
+
+ result = uint_eq_fail();
+
+ ptu_uint_eq(result.type, ptur_failed_unsigned_int);
+ ptu_str_eq(result.failed.where.file, __FILE__);
+ ptu_uint_lt(result.failed.where.line, __LINE__);
+ ptu_str_eq(result.failed.variant.unsigned_int.expr, "zero==one");
+ ptu_str_eq(result.failed.variant.unsigned_int.cmp, "==");
+ ptu_int_eq(result.failed.variant.unsigned_int.expected, 1);
+ ptu_int_eq(result.failed.variant.unsigned_int.actual, 0);
+
+ return ptu_passed();
+}
+
+static int i, j, *pi = &i, *null;
+
+static struct ptunit_result ptr_eq_fail(void)
+{
+ ptu_ptr_eq(pi, &j);
+
+ return ptu_skipped();
+}
+
+static struct ptunit_result ptr_fail(void)
+{
+ struct ptunit_result result;
+
+ result = ptr_eq_fail();
+
+ ptu_uint_eq(result.type, ptur_failed_pointer);
+ ptu_str_eq(result.failed.where.file, __FILE__);
+ ptu_uint_lt(result.failed.where.line, __LINE__);
+ ptu_str_eq(result.failed.variant.pointer.expr, "pi==&j");
+ ptu_str_eq(result.failed.variant.pointer.cmp, "==");
+ ptu_ptr_eq(result.failed.variant.pointer.expected, &j);
+ ptu_ptr_eq(result.failed.variant.pointer.actual, &i);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptr_null_fail(void)
+{
+ ptu_null(pi);
+
+ return ptu_skipped();
+}
+
+static struct ptunit_result null_fail(void)
+{
+ struct ptunit_result result;
+
+ result = ptr_null_fail();
+
+ ptu_uint_eq(result.type, ptur_failed_pointer);
+ ptu_str_eq(result.failed.where.file, __FILE__);
+ ptu_uint_lt(result.failed.where.line, __LINE__);
+ ptu_str_eq(result.failed.variant.pointer.expr, "pi==NULL");
+ ptu_str_eq(result.failed.variant.pointer.cmp, "==");
+ ptu_ptr_eq(result.failed.variant.pointer.expected, NULL);
+ ptu_ptr_eq(result.failed.variant.pointer.actual, &i);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptr_check_fail(void)
+{
+ ptu_ptr(null);
+
+ return ptu_skipped();
+}
+
+static struct ptunit_result check_fail(void)
+{
+ struct ptunit_result result;
+
+ result = ptr_check_fail();
+
+ ptu_uint_eq(result.type, ptur_failed_pointer);
+ ptu_str_eq(result.failed.where.file, __FILE__);
+ ptu_uint_lt(result.failed.where.line, __LINE__);
+ ptu_str_eq(result.failed.variant.pointer.expr, "null!=NULL");
+ ptu_str_eq(result.failed.variant.pointer.cmp, "!=");
+ ptu_ptr_eq(result.failed.variant.pointer.expected, NULL);
+ ptu_ptr_eq(result.failed.variant.pointer.actual, null);
+
+ return ptu_passed();
+}
+
+/* A unit test fixture providing a unit test struct and cleaning it up. */
+struct test_fixture {
+ /* A unit test. */
+ struct ptunit_test test;
+
+ /* Standard initialization and finalization functions. */
+ struct ptunit_result (*init)(struct test_fixture *);
+ struct ptunit_result (*fini)(struct test_fixture *);
+};
+
+static struct ptunit_result init_test_fixture(struct test_fixture *tfix)
+{
+ tfix->test = ptunit_mk_test(NULL, NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fini_test_fixture(struct test_fixture *tfix)
+{
+ ptunit_fini_test(&tfix->test);
+
+ return ptu_passed();
+}
+
+static const char *sfoo = "foo", *sbar = "bar", *snull;
+
+static struct ptunit_result str_eq_fail(void)
+{
+ ptu_str_eq(sfoo, sbar);
+
+ return ptu_skipped();
+}
+
+static struct ptunit_result str_fail(struct test_fixture *tfix)
+{
+ struct ptunit_result *result = &tfix->test.result;
+
+ *result = str_eq_fail();
+
+ ptu_uint_eq(result->type, ptur_failed_str);
+ ptu_str_eq(result->failed.where.file, __FILE__);
+ ptu_uint_lt(result->failed.where.line, __LINE__);
+ ptu_str_eq(result->failed.variant.str.expr, "sfoo~==sbar");
+ ptu_str_eq(result->failed.variant.str.cmp, "~==");
+ ptu_str_eq(result->failed.variant.str.expected, "bar");
+ ptu_str_eq(result->failed.variant.str.actual, "foo");
+
+ return ptu_passed();
+}
+
+static struct ptunit_result str_eq_null(void)
+{
+ ptu_str_eq(snull, sbar);
+
+ return ptu_skipped();
+}
+
+static struct ptunit_result str_null(struct test_fixture *tfix)
+{
+ struct ptunit_result *result = &tfix->test.result;
+
+ *result = str_eq_null();
+
+ ptu_uint_eq(result->type, ptur_failed_str);
+ ptu_str_eq(result->failed.where.file, __FILE__);
+ ptu_uint_lt(result->failed.where.line, __LINE__);
+ ptu_str_eq(result->failed.variant.str.expr, "snull~==sbar");
+ ptu_str_eq(result->failed.variant.str.cmp, "~==");
+ ptu_str_eq(result->failed.variant.str.expected, "bar");
+ ptu_str_eq(result->failed.variant.str.actual, "(null)");
+
+ return ptu_passed();
+}
+
+static struct ptunit_result param(int arg_i, int *arg_pi)
+{
+ ptu_int_eq(arg_i, i);
+ ptu_ptr_eq(arg_pi, pi);
+
+ return ptu_passed();
+}
+
+struct fixture {
+ struct ptunit_result (*fini)(struct fixture *);
+ uint8_t *pointer;
+ struct ptunit_result (*init)(struct fixture *);
+};
+
+static struct ptunit_result init_fixture(struct fixture *pfix)
+{
+ pfix->pointer = malloc(42);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fini_fixture(struct fixture *pfix)
+{
+ free(pfix->pointer);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fixture(struct fixture *pfix)
+{
+ ptu_ptr(pfix);
+ ptu_ptr(pfix->pointer);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fixture_param(struct fixture *pfix, uint8_t *rep)
+{
+ ptu_ptr(pfix);
+ ptu_ptr(pfix->pointer);
+
+ free(pfix->pointer);
+ pfix->pointer = rep;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result frun_pass(struct fixture *pfix)
+{
+ (void) pfix;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result frun_skip(struct fixture *pfix)
+{
+ (void) pfix;
+
+ return ptu_skipped();
+}
+
+static struct ptunit_result frun_fail(struct fixture *pfix)
+{
+ ptu_null(pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result frun_die(struct fixture *pfix)
+{
+ (void) pfix;
+
+ *((volatile int *) NULL) = 0;
+
+ return ptu_skipped();
+}
+
+static struct ptunit_result frun_empty_pass(void)
+{
+ struct fixture pfix;
+ struct ptunit_result result;
+
+ pfix.init = NULL;
+ pfix.fini = NULL;
+ ptunit_frun(result, frun_pass, &pfix, &pfix);
+
+ ptu_uint_eq(result.type, ptur_passed);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result frun_init_fail(struct fixture *pfix)
+{
+ struct ptunit_result result;
+
+ pfix->init = frun_fail;
+ pfix->fini = frun_skip;
+ ptunit_frun(result, frun_die, pfix, pfix);
+
+ ptu_uint_eq(result.type, ptur_failed_pointer);
+ ptu_str_eq(result.failed.where.file, __FILE__);
+ ptu_uint_lt(result.failed.where.line, __LINE__);
+ ptu_str_eq(result.failed.variant.pointer.expr, "pfix==NULL");
+ ptu_str_eq(result.failed.variant.pointer.cmp, "==");
+ ptu_ptr_eq(result.failed.variant.pointer.expected, NULL);
+ ptu_ptr_eq(result.failed.variant.pointer.actual, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result frun_init_skip(void)
+{
+ struct fixture pfix;
+ struct ptunit_result result;
+
+ pfix.init = frun_skip;
+ pfix.fini = frun_fail;
+ ptunit_frun(result, frun_die, &pfix, &pfix);
+
+ ptu_uint_eq(result.type, ptur_skipped);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result frun_fini_fail(struct fixture *pfix)
+{
+ struct ptunit_result result;
+
+ pfix->init = NULL;
+ pfix->fini = frun_fail;
+ ptunit_frun(result, frun_pass, pfix, pfix);
+
+ ptu_uint_eq(result.type, ptur_failed_pointer);
+ ptu_str_eq(result.failed.where.file, __FILE__);
+ ptu_uint_lt(result.failed.where.line, __LINE__);
+ ptu_str_eq(result.failed.variant.pointer.expr, "pfix==NULL");
+ ptu_str_eq(result.failed.variant.pointer.cmp, "==");
+ ptu_ptr_eq(result.failed.variant.pointer.expected, NULL);
+ ptu_ptr_eq(result.failed.variant.pointer.actual, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result frun_fini_skip(void)
+{
+ struct fixture pfix;
+ struct ptunit_result result;
+
+ pfix.init = NULL;
+ pfix.fini = frun_skip;
+ ptunit_frun(result, frun_pass, &pfix, &pfix);
+
+ ptu_uint_eq(result.type, ptur_skipped);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result frun_fini_preserve(void)
+{
+ struct fixture pfix;
+ struct ptunit_result result;
+
+ pfix.init = NULL;
+ pfix.fini = frun_fail;
+ ptunit_frun(result, frun_skip, &pfix, &pfix);
+
+ ptu_uint_eq(result.type, ptur_skipped);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+ struct test_fixture tfix;
+ struct fixture pfix;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, cmp_pass);
+ ptu_run(suite, int_fail);
+ ptu_run(suite, uint_fail);
+ ptu_run(suite, ptr_fail);
+ ptu_run(suite, null_fail);
+ ptu_run(suite, check_fail);
+
+ tfix.init = init_test_fixture;
+ tfix.fini = fini_test_fixture;
+
+ ptu_run_f(suite, str_fail, tfix);
+ ptu_run_f(suite, str_null, tfix);
+
+ pfix.pointer = NULL;
+ pfix.init = init_fixture;
+ pfix.fini = fini_fixture;
+
+ ptu_run_p(suite, param, i, pi);
+ ptu_run_f(suite, fixture, pfix);
+ ptu_run_fp(suite, fixture_param, pfix, NULL);
+
+ ptu_run(suite, frun_empty_pass);
+ ptu_run(suite, frun_init_skip);
+ ptu_run(suite, frun_fini_skip);
+ ptu_run(suite, frun_fini_preserve);
+
+ ptu_run_p(suite, frun_init_fail, &pfix);
+ ptu_run_p(suite, frun_fini_fail, &pfix);
+
+ return ptunit_report(&suite);
+}
diff --git a/ptxed/CMakeLists.txt b/ptxed/CMakeLists.txt
new file mode 100644
index 000000000000..f05ab0cb3eb8
--- /dev/null
+++ b/ptxed/CMakeLists.txt
@@ -0,0 +1,79 @@
+# Copyright (c) 2013-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+set(XED_INCLUDE "" CACHE PATH "")
+set(XED_LIBDIR "" CACHE PATH "")
+
+include_directories(
+ include
+ ../libipt/internal/include
+)
+
+include_directories(SYSTEM
+ ${XED_INCLUDE}
+)
+
+link_directories(
+ ${XED_LIBDIR}
+)
+
+set(PTXED_FILES
+ src/ptxed.c
+ ../libipt/src/pt_cpu.c
+)
+
+if (CMAKE_HOST_UNIX)
+ set(PTXED_FILES ${PTXED_FILES} ../libipt/src/posix/pt_cpuid.c)
+endif (CMAKE_HOST_UNIX)
+
+if (CMAKE_HOST_WIN32)
+ set(PTXED_FILES ${PTXED_FILES} ../libipt/src/windows/pt_cpuid.c)
+endif (CMAKE_HOST_WIN32)
+
+if (FEATURE_ELF)
+ set(PTXED_FILES ${PTXED_FILES} src/load_elf.c)
+endif (FEATURE_ELF)
+
+add_executable(ptxed
+ ${PTXED_FILES}
+)
+target_link_libraries(ptxed libipt)
+target_link_libraries(ptxed xed)
+
+if (SIDEBAND)
+ target_link_libraries(ptxed libipt-sb)
+endif (SIDEBAND)
+
+if (CMAKE_HOST_WIN32)
+ # suppress warnings from XED header files
+ #
+ # w4127: conditional expression is constant
+ # w4244: conversion: possible loss of data
+ #
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4127")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4244")
+
+endif (CMAKE_HOST_WIN32)
diff --git a/ptxed/include/load_elf.h b/ptxed/include/load_elf.h
new file mode 100644
index 000000000000..6e956509880c
--- /dev/null
+++ b/ptxed/include/load_elf.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LOAD_ELF_H
+#define LOAD_ELF_H
+
+#include <stdint.h>
+
+struct pt_image_section_cache;
+struct pt_image;
+
+
+/* Load an ELF file.
+ *
+ * Adds sections for all ELF LOAD segments.
+ *
+ * The sections are loaded relative to their virtual addresses specified
+ * in the ELF program header with the lowest address section loaded at @base.
+ *
+ * The name of the program in @prog is used for error reporting.
+ * If @verbose is non-zero, prints information about loaded sections.
+ *
+ * Does not load dependent files.
+ * Does not support dynamic relocations.
+ *
+ * Successfully loaded segments are not unloaded in case of errors.
+ *
+ * If @iscache is not NULL, use it to cache image sections.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ * Returns -pte_invalid if @image or @file are NULL.
+ * Returns -pte_bad_config if @file can't be processed.
+ * Returns -pte_nomem if not enough memory can be allocated.
+ */
+extern int load_elf(struct pt_image_section_cache *iscache,
+ struct pt_image *image, const char *file,
+ uint64_t base, const char *prog, int verbose);
+
+#endif /* LOAD_ELF_H */
diff --git a/ptxed/src/load_elf.c b/ptxed/src/load_elf.c
new file mode 100644
index 000000000000..00efef5b68a4
--- /dev/null
+++ b/ptxed/src/load_elf.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "load_elf.h"
+
+#include "intel-pt.h"
+
+#include <stdio.h>
+#include <elf.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <string.h>
+#include <limits.h>
+
+
+static int load_section(struct pt_image_section_cache *iscache,
+ struct pt_image *image, const char *name,
+ uint64_t offset, uint64_t size, uint64_t vaddr)
+{
+ if (!iscache)
+ return pt_image_add_file(image, name, offset, size, NULL,
+ vaddr);
+ else {
+ int isid;
+
+ isid = pt_iscache_add_file(iscache, name, offset, size, vaddr);
+ if (isid < 0)
+ return isid;
+
+ return pt_image_add_cached(image, iscache, isid, NULL);
+ }
+}
+
+static int load_elf32(struct pt_image_section_cache *iscache,
+ struct pt_image *image, FILE *file, uint64_t base,
+ const char *name, const char *prog, int verbose)
+{
+ Elf32_Ehdr ehdr;
+ Elf32_Half pidx;
+ int64_t offset;
+ size_t count;
+ int errcode, sections;
+
+ errcode = fseek(file, 0, SEEK_SET);
+ if (errcode) {
+ fprintf(stderr,
+ "%s: warning: %s error seeking ELF header: %s.\n",
+ prog, name, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ count = fread(&ehdr, sizeof(ehdr), 1, file);
+ if (count != 1) {
+ fprintf(stderr,
+ "%s: warning: %s error reading ELF header: %s.\n",
+ prog, name, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ errcode = fseek(file, (long) ehdr.e_phoff, SEEK_SET);
+ if (errcode) {
+ fprintf(stderr,
+ "%s: warning: %s error seeking program header: %s.\n",
+ prog, name, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ /* Determine the load offset. */
+ if (!base)
+ offset = 0;
+ else {
+ uint64_t minaddr;
+
+ minaddr = UINT64_MAX;
+
+ for (pidx = 0; pidx < ehdr.e_phnum; ++pidx) {
+ Elf32_Phdr phdr;
+
+ count = fread(&phdr, sizeof(phdr), 1, file);
+ if (count != 1) {
+ fprintf(stderr,
+ "%s: warning: %s error reading "
+ "phdr %u: %s.\n",
+ prog, name, pidx, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ if (phdr.p_type != PT_LOAD)
+ continue;
+
+ if (phdr.p_vaddr < minaddr)
+ minaddr = phdr.p_vaddr;
+ }
+
+ offset = base - minaddr;
+ }
+
+ errcode = fseek(file, (long) ehdr.e_phoff, SEEK_SET);
+ if (errcode) {
+ fprintf(stderr,
+ "%s: warning: %s error seeking program header: %s.\n",
+ prog, name, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ for (sections = 0, pidx = 0; pidx < ehdr.e_phnum; ++pidx) {
+ Elf32_Phdr phdr;
+
+ count = fread(&phdr, sizeof(phdr), 1, file);
+ if (count != 1) {
+ fprintf(stderr,
+ "%s: warning: %s error reading phdr %u: %s.\n",
+ prog, name, pidx, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ if (phdr.p_type != PT_LOAD)
+ continue;
+
+ if (!phdr.p_filesz)
+ continue;
+
+ errcode = load_section(iscache, image, name, phdr.p_offset,
+ phdr.p_filesz, phdr.p_vaddr + offset);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: warning: %s: failed to create "
+ "section for phdr %u: %s.\n", prog, name, pidx,
+ pt_errstr(pt_errcode(errcode)));
+ continue;
+ }
+
+ sections += 1;
+
+ if (verbose) {
+ printf("%s:", name);
+ printf(" offset=0x%" PRIx32, phdr.p_offset);
+ printf(" size=0x%" PRIx32, phdr.p_filesz);
+ printf(" vaddr=0x%" PRIx32, phdr.p_vaddr);
+ printf(".\n");
+ }
+ }
+
+ if (!sections)
+ fprintf(stderr,
+ "%s: warning: %s: did not find any load sections.\n",
+ prog, name);
+
+ return 0;
+}
+
+static int load_elf64(struct pt_image_section_cache *iscache,
+ struct pt_image *image, FILE *file, uint64_t base,
+ const char *name, const char *prog, int verbose)
+{
+ Elf64_Ehdr ehdr;
+ Elf64_Half pidx;
+ int64_t offset;
+ size_t count;
+ int errcode, sections;
+
+ errcode = fseek(file, 0, SEEK_SET);
+ if (errcode) {
+ fprintf(stderr,
+ "%s: warning: %s error seeking ELF header: %s.\n",
+ prog, name, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ count = fread(&ehdr, sizeof(ehdr), 1, file);
+ if (count != 1) {
+ fprintf(stderr,
+ "%s: warning: %s error reading ELF header: %s.\n",
+ prog, name, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ if (LONG_MAX < ehdr.e_phoff) {
+ fprintf(stderr, "%s: warning: %s ELF header too big.\n",
+ prog, name);
+ return -pte_bad_config;
+ }
+
+ errcode = fseek(file, (long) ehdr.e_phoff, SEEK_SET);
+ if (errcode) {
+ fprintf(stderr,
+ "%s: warning: %s error seeking program header: %s.\n",
+ prog, name, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ /* Determine the load offset. */
+ if (!base)
+ offset = 0;
+ else {
+ uint64_t minaddr;
+
+ minaddr = UINT64_MAX;
+
+ for (pidx = 0; pidx < ehdr.e_phnum; ++pidx) {
+ Elf64_Phdr phdr;
+
+ count = fread(&phdr, sizeof(phdr), 1, file);
+ if (count != 1) {
+ fprintf(stderr,
+ "%s: warning: %s error reading "
+ "phdr %u: %s.\n",
+ prog, name, pidx, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ if (phdr.p_type != PT_LOAD)
+ continue;
+
+ if (phdr.p_vaddr < minaddr)
+ minaddr = phdr.p_vaddr;
+ }
+
+ offset = base - minaddr;
+ }
+
+ errcode = fseek(file, (long) ehdr.e_phoff, SEEK_SET);
+ if (errcode) {
+ fprintf(stderr,
+ "%s: warning: %s error seeking program header: %s.\n",
+ prog, name, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ for (sections = 0, pidx = 0; pidx < ehdr.e_phnum; ++pidx) {
+ Elf64_Phdr phdr;
+
+ count = fread(&phdr, sizeof(phdr), 1, file);
+ if (count != 1) {
+ fprintf(stderr,
+ "%s: warning: %s error reading phdr %u: %s.\n",
+ prog, name, pidx, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ if (phdr.p_type != PT_LOAD)
+ continue;
+
+ if (!phdr.p_filesz)
+ continue;
+
+ errcode = load_section(iscache, image, name, phdr.p_offset,
+ phdr.p_filesz, phdr.p_vaddr + offset);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: warning: %s: failed to create "
+ "section for phdr %u: %s.\n", prog, name, pidx,
+ pt_errstr(pt_errcode(errcode)));
+ continue;
+ }
+
+ sections += 1;
+
+ if (verbose) {
+ printf("%s:", name);
+ printf(" offset=0x%" PRIx64, phdr.p_offset);
+ printf(" size=0x%" PRIx64, phdr.p_filesz);
+ printf(" vaddr=0x%" PRIx64, phdr.p_vaddr);
+ printf(".\n");
+ }
+ }
+
+ if (!sections)
+ fprintf(stderr,
+ "%s: warning: %s: did not find any load sections.\n",
+ prog, name);
+
+ return 0;
+}
+
+int load_elf(struct pt_image_section_cache *iscache, struct pt_image *image,
+ const char *name, uint64_t base, const char *prog, int verbose)
+{
+ uint8_t e_ident[EI_NIDENT];
+ FILE *file;
+ size_t count;
+ int errcode, idx;
+
+ if (!image || !name)
+ return -pte_invalid;
+
+ file = fopen(name, "rb");
+ if (!file) {
+ fprintf(stderr, "%s: warning: failed to open %s: %s.\n", prog,
+ name, strerror(errno));
+ return -pte_bad_config;
+ }
+
+ count = fread(e_ident, sizeof(e_ident), 1, file);
+ if (count != 1) {
+ fprintf(stderr,
+ "%s: warning: %s failed to read file header: %s.\n",
+ prog, name, strerror(errno));
+
+ errcode = -pte_bad_config;
+ goto out;
+ }
+
+ for (idx = 0; idx < SELFMAG; ++idx) {
+ if (e_ident[idx] != ELFMAG[idx]) {
+ fprintf(stderr,
+ "%s: warning: ignoring %s: not an ELF file.\n",
+ prog, name);
+
+ errcode = -pte_bad_config;
+ goto out;
+ }
+ }
+
+ switch (e_ident[EI_CLASS]) {
+ default:
+ fprintf(stderr, "%s: unsupported ELF class: %d\n",
+ prog, e_ident[EI_CLASS]);
+ errcode = -pte_bad_config;
+ break;
+
+ case ELFCLASS32:
+ errcode = load_elf32(iscache, image, file, base, name, prog,
+ verbose);
+ break;
+
+ case ELFCLASS64:
+ errcode = load_elf64(iscache, image, file, base, name, prog,
+ verbose);
+ break;
+ }
+
+out:
+ fclose(file);
+ return errcode;
+}
diff --git a/ptxed/src/ptxed.c b/ptxed/src/ptxed.c
new file mode 100644
index 000000000000..a6ad3fc203f5
--- /dev/null
+++ b/ptxed/src/ptxed.c
@@ -0,0 +1,2829 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(FEATURE_ELF)
+# include "load_elf.h"
+#endif /* defined(FEATURE_ELF) */
+
+#include "pt_cpu.h"
+
+#include "intel-pt.h"
+
+#if defined(FEATURE_SIDEBAND)
+# include "libipt-sb.h"
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+
+#include <xed-interface.h>
+
+
+/* The type of decoder to be used. */
+enum ptxed_decoder_type {
+ pdt_insn_decoder,
+ pdt_block_decoder
+};
+
+/* The decoder to use. */
+struct ptxed_decoder {
+ /* The decoder type. */
+ enum ptxed_decoder_type type;
+
+ /* The actual decoder. */
+ union {
+ /* If @type == pdt_insn_decoder */
+ struct pt_insn_decoder *insn;
+
+ /* If @type == pdt_block_decoder */
+ struct pt_block_decoder *block;
+ } variant;
+
+ /* The image section cache. */
+ struct pt_image_section_cache *iscache;
+
+#if defined(FEATURE_SIDEBAND)
+ /* The sideband session. */
+ struct pt_sb_session *session;
+
+#if defined(FEATURE_PEVENT)
+ /* The perf event sideband decoder configuration. */
+ struct pt_sb_pevent_config pevent;
+#endif /* defined(FEATURE_PEVENT) */
+#endif /* defined(FEATURE_SIDEBAND) */
+};
+
+/* A collection of options. */
+struct ptxed_options {
+#if defined(FEATURE_SIDEBAND)
+ /* Sideband dump flags. */
+ uint32_t sb_dump_flags;
+#endif
+ /* Do not print the instruction. */
+ uint32_t dont_print_insn:1;
+
+ /* Remain as quiet as possible - excluding error messages. */
+ uint32_t quiet:1;
+
+ /* Print statistics (overrides quiet). */
+ uint32_t print_stats:1;
+
+ /* Print information about section loads and unloads. */
+ uint32_t track_image:1;
+
+ /* Track blocks in the output.
+ *
+ * This only applies to the block decoder.
+ */
+ uint32_t track_blocks:1;
+
+ /* Print in AT&T format. */
+ uint32_t att_format:1;
+
+ /* Print the offset into the trace file. */
+ uint32_t print_offset:1;
+
+ /* Print the current timestamp. */
+ uint32_t print_time:1;
+
+ /* Print the raw bytes for an insn. */
+ uint32_t print_raw_insn:1;
+
+ /* Perform checks. */
+ uint32_t check:1;
+
+ /* Print the time stamp of events. */
+ uint32_t print_event_time:1;
+
+ /* Print the ip of events. */
+ uint32_t print_event_ip:1;
+
+ /* Request tick events. */
+ uint32_t enable_tick_events:1;
+
+#if defined(FEATURE_SIDEBAND)
+ /* Print sideband warnings. */
+ uint32_t print_sb_warnings:1;
+#endif
+};
+
+/* A collection of flags selecting which stats to collect/print. */
+enum ptxed_stats_flag {
+ /* Collect number of instructions. */
+ ptxed_stat_insn = (1 << 0),
+
+ /* Collect number of blocks. */
+ ptxed_stat_blocks = (1 << 1)
+};
+
+/* A collection of statistics. */
+struct ptxed_stats {
+ /* The number of instructions. */
+ uint64_t insn;
+
+ /* The number of blocks.
+ *
+ * This only applies to the block decoder.
+ */
+ uint64_t blocks;
+
+ /* A collection of flags saying which statistics to collect/print. */
+ uint32_t flags;
+};
+
+static int ptxed_have_decoder(const struct ptxed_decoder *decoder)
+{
+ /* It suffices to check for one decoder in the variant union. */
+ return decoder && decoder->variant.insn;
+}
+
+static int ptxed_init_decoder(struct ptxed_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ memset(decoder, 0, sizeof(*decoder));
+ decoder->type = pdt_block_decoder;
+
+ decoder->iscache = pt_iscache_alloc(NULL);
+ if (!decoder->iscache)
+ return -pte_nomem;
+
+#if defined(FEATURE_SIDEBAND)
+ decoder->session = pt_sb_alloc(decoder->iscache);
+ if (!decoder->session) {
+ pt_iscache_free(decoder->iscache);
+ return -pte_nomem;
+ }
+
+#if defined(FEATURE_PEVENT)
+ memset(&decoder->pevent, 0, sizeof(decoder->pevent));
+ decoder->pevent.size = sizeof(decoder->pevent);
+ decoder->pevent.kernel_start = UINT64_MAX;
+ decoder->pevent.time_mult = 1;
+#endif /* defined(FEATURE_PEVENT) */
+#endif /* defined(FEATURE_SIDEBAND) */
+
+ return 0;
+}
+
+static void ptxed_free_decoder(struct ptxed_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ switch (decoder->type) {
+ case pdt_insn_decoder:
+ pt_insn_free_decoder(decoder->variant.insn);
+ break;
+
+ case pdt_block_decoder:
+ pt_blk_free_decoder(decoder->variant.block);
+ break;
+ }
+
+#if defined(FEATURE_SIDEBAND)
+ pt_sb_free(decoder->session);
+#endif
+
+ pt_iscache_free(decoder->iscache);
+}
+
+static void version(const char *name)
+{
+ struct pt_version v = pt_library_version();
+
+ printf("%s-%d.%d.%d%s / libipt-%" PRIu8 ".%" PRIu8 ".%" PRIu32 "%s\n",
+ name, PT_VERSION_MAJOR, PT_VERSION_MINOR, PT_VERSION_BUILD,
+ PT_VERSION_EXT, v.major, v.minor, v.build, v.ext);
+}
+
+static void help(const char *name)
+{
+ printf("usage: %s [<options>]\n\n", name);
+ printf("options:\n");
+ printf(" --help|-h this text.\n");
+ printf(" --version display version information and exit.\n");
+ printf(" --att print instructions in att format.\n");
+ printf(" --no-inst do not print instructions (only addresses).\n");
+ printf(" --quiet|-q do not print anything (except errors).\n");
+ printf(" --offset print the offset into the trace file.\n");
+ printf(" --time print the current timestamp.\n");
+ printf(" --raw-insn print the raw bytes of each instruction.\n");
+ printf(" --check perform checks (expensive).\n");
+ printf(" --iscache-limit <size> set the image section cache limit to <size> bytes.\n");
+ printf(" --event:time print the tsc for events if available.\n");
+ printf(" --event:ip print the ip of events if available.\n");
+ printf(" --event:tick request tick events.\n");
+ printf(" --filter:addr<n>_cfg <cfg> set IA32_RTIT_CTL.ADDRn_CFG to <cfg>.\n");
+ printf(" --filter:addr<n>_a <base> set IA32_RTIT_ADDRn_A to <base>.\n");
+ printf(" --filter:addr<n>_b <limit> set IA32_RTIT_ADDRn_B to <limit>.\n");
+ printf(" --stat print statistics (even when quiet).\n");
+ printf(" collects all statistics unless one or more are selected.\n");
+ printf(" --stat:insn collect number of instructions.\n");
+#if defined(FEATURE_SIDEBAND)
+ printf(" --sb:compact | --sb show sideband records in compact format.\n");
+ printf(" --sb:verbose show sideband records in verbose format.\n");
+ printf(" --sb:filename show the filename on sideband records.\n");
+ printf(" --sb:offset show the offset on sideband records.\n");
+ printf(" --sb:time show the time on sideband records.\n");
+ printf(" --sb:switch print the new image name on context switches.\n");
+ printf(" --sb:warn show sideband warnings.\n");
+#if defined(FEATURE_PEVENT)
+ printf(" --pevent:primary/secondary <file>[:<from>[-<to>]]\n");
+ printf(" load a perf_event sideband stream from <file>.\n");
+ printf(" an optional offset or range can be given.\n");
+ printf(" --pevent:sample-type <val> set perf_event_attr.sample_type to <val> (default: 0).\n");
+ printf(" --pevent:time-zero <val> set perf_event_mmap_page.time_zero to <val> (default: 0).\n");
+ printf(" --pevent:time-shift <val> set perf_event_mmap_page.time_shift to <val> (default: 0).\n");
+ printf(" --pevent:time-mult <val> set perf_event_mmap_page.time_mult to <val> (default: 1).\n");
+ printf(" --pevent:tsc-offset <val> show perf events <val> ticks earlier.\n");
+ printf(" --pevent:kernel-start <val> the start address of the kernel.\n");
+ printf(" --pevent:sysroot <path> prepend <path> to sideband filenames.\n");
+#if defined(FEATURE_ELF)
+ printf(" --pevent:kcore <file> load the kernel from a core dump.\n");
+#endif /* defined(FEATURE_ELF) */
+ printf(" --pevent:vdso-x64 <file> use <file> as 64-bit vdso.\n");
+ printf(" --pevent:vdso-x32 <file> use <file> as x32 vdso.\n");
+ printf(" --pevent:vdso-ia32 <file> use <file> as 32-bit vdso.\n");
+#endif /* defined(FEATURE_PEVENT) */
+#endif /* defined(FEATURE_SIDEBAND) */
+ printf(" --verbose|-v print various information (even when quiet).\n");
+ printf(" --pt <file>[:<from>[-<to>]] load the processor trace data from <file>.\n");
+ printf(" an optional offset or range can be given.\n");
+#if defined(FEATURE_ELF)
+ printf(" --elf <<file>[:<base>] load an ELF from <file> at address <base>.\n");
+ printf(" use the default load address if <base> is omitted.\n");
+#endif /* defined(FEATURE_ELF) */
+ printf(" --raw <file>[:<from>[-<to>]]:<base> load a raw binary from <file> at address <base>.\n");
+ printf(" an optional offset or range can be given.\n");
+ printf(" --cpu none|auto|f/m[/s] set cpu to the given value and decode according to:\n");
+ printf(" none spec (default)\n");
+ printf(" auto current cpu\n");
+ printf(" f/m[/s] family/model[/stepping]\n");
+ printf(" --mtc-freq <n> set the MTC frequency (IA32_RTIT_CTL[17:14]) to <n>.\n");
+ printf(" --nom-freq <n> set the nominal frequency (MSR_PLATFORM_INFO[15:8]) to <n>.\n");
+ printf(" --cpuid-0x15.eax set the value of cpuid[0x15].eax.\n");
+ printf(" --cpuid-0x15.ebx set the value of cpuid[0x15].ebx.\n");
+ printf(" --insn-decoder use the instruction flow decoder (default).\n");
+ printf(" --block-decoder use the block decoder.\n");
+ printf(" --block:show-blocks show blocks in the output.\n");
+ printf(" --block:end-on-call set the end-on-call block decoder flag.\n");
+ printf(" --block:end-on-jump set the end-on-jump block decoder flag.\n");
+ printf("\n");
+#if defined(FEATURE_ELF)
+ printf("You must specify at least one binary or ELF file (--raw|--elf).\n");
+#else /* defined(FEATURE_ELF) */
+ printf("You must specify at least one binary file (--raw).\n");
+#endif /* defined(FEATURE_ELF) */
+ printf("You must specify exactly one processor trace file (--pt).\n");
+}
+
+static int extract_base(char *arg, uint64_t *base)
+{
+ char *sep, *rest;
+
+ sep = strrchr(arg, ':');
+ if (sep) {
+ uint64_t num;
+
+ if (!sep[1])
+ return 0;
+
+ errno = 0;
+ num = strtoull(sep+1, &rest, 0);
+ if (errno || *rest)
+ return 0;
+
+ *base = num;
+ *sep = 0;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int parse_range(const char *arg, uint64_t *begin, uint64_t *end)
+{
+ char *rest;
+
+ if (!arg || !*arg)
+ return 0;
+
+ errno = 0;
+ *begin = strtoull(arg, &rest, 0);
+ if (errno)
+ return -1;
+
+ if (!*rest)
+ return 1;
+
+ if (*rest != '-')
+ return -1;
+
+ *end = strtoull(rest+1, &rest, 0);
+ if (errno || *rest)
+ return -1;
+
+ return 2;
+}
+
+/* Preprocess a filename argument.
+ *
+ * A filename may optionally be followed by a file offset or a file range
+ * argument separated by ':'. Split the original argument into the filename
+ * part and the offset/range part.
+ *
+ * If no end address is specified, set @size to zero.
+ * If no offset is specified, set @offset to zero.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int preprocess_filename(char *filename, uint64_t *offset, uint64_t *size)
+{
+ uint64_t begin, end;
+ char *range;
+ int parts;
+
+ if (!filename || !offset || !size)
+ return -pte_internal;
+
+ /* Search from the end as the filename may also contain ':'. */
+ range = strrchr(filename, ':');
+ if (!range) {
+ *offset = 0ull;
+ *size = 0ull;
+
+ return 0;
+ }
+
+ /* Let's try to parse an optional range suffix.
+ *
+ * If we can, remove it from the filename argument.
+ * If we can not, assume that the ':' is part of the filename, e.g. a
+ * drive letter on Windows.
+ */
+ parts = parse_range(range + 1, &begin, &end);
+ if (parts <= 0) {
+ *offset = 0ull;
+ *size = 0ull;
+
+ return 0;
+ }
+
+ if (parts == 1) {
+ *offset = begin;
+ *size = 0ull;
+
+ *range = 0;
+
+ return 0;
+ }
+
+ if (parts == 2) {
+ if (end <= begin)
+ return -pte_invalid;
+
+ *offset = begin;
+ *size = end - begin;
+
+ *range = 0;
+
+ return 0;
+ }
+
+ return -pte_internal;
+}
+
+static int load_file(uint8_t **buffer, size_t *psize, const char *filename,
+ uint64_t offset, uint64_t size, const char *prog)
+{
+ uint8_t *content;
+ size_t read;
+ FILE *file;
+ long fsize, begin, end;
+ int errcode;
+
+ if (!buffer || !psize || !filename || !prog) {
+ fprintf(stderr, "%s: internal error.\n", prog ? prog : "");
+ return -1;
+ }
+
+ errno = 0;
+ file = fopen(filename, "rb");
+ if (!file) {
+ fprintf(stderr, "%s: failed to open %s: %d.\n",
+ prog, filename, errno);
+ return -1;
+ }
+
+ errcode = fseek(file, 0, SEEK_END);
+ if (errcode) {
+ fprintf(stderr, "%s: failed to determine size of %s: %d.\n",
+ prog, filename, errno);
+ goto err_file;
+ }
+
+ fsize = ftell(file);
+ if (fsize < 0) {
+ fprintf(stderr, "%s: failed to determine size of %s: %d.\n",
+ prog, filename, errno);
+ goto err_file;
+ }
+
+ begin = (long) offset;
+ if (((uint64_t) begin != offset) || (fsize <= begin)) {
+ fprintf(stderr,
+ "%s: bad offset 0x%" PRIx64 " into %s.\n",
+ prog, offset, filename);
+ goto err_file;
+ }
+
+ end = fsize;
+ if (size) {
+ uint64_t range_end;
+
+ range_end = offset + size;
+ if ((uint64_t) end < range_end) {
+ fprintf(stderr,
+ "%s: bad range 0x%" PRIx64 " in %s.\n",
+ prog, range_end, filename);
+ goto err_file;
+ }
+
+ end = (long) range_end;
+ }
+
+ fsize = end - begin;
+
+ content = malloc(fsize);
+ if (!content) {
+ fprintf(stderr, "%s: failed to allocated memory %s.\n",
+ prog, filename);
+ goto err_file;
+ }
+
+ errcode = fseek(file, begin, SEEK_SET);
+ if (errcode) {
+ fprintf(stderr, "%s: failed to load %s: %d.\n",
+ prog, filename, errno);
+ goto err_content;
+ }
+
+ read = fread(content, fsize, 1, file);
+ if (read != 1) {
+ fprintf(stderr, "%s: failed to load %s: %d.\n",
+ prog, filename, errno);
+ goto err_content;
+ }
+
+ fclose(file);
+
+ *buffer = content;
+ *psize = fsize;
+
+ return 0;
+
+err_content:
+ free(content);
+
+err_file:
+ fclose(file);
+ return -1;
+}
+
+static int load_pt(struct pt_config *config, char *arg, const char *prog)
+{
+ uint64_t foffset, fsize;
+ uint8_t *buffer;
+ size_t size;
+ int errcode;
+
+ errcode = preprocess_filename(arg, &foffset, &fsize);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: bad file %s: %s.\n", prog, arg,
+ pt_errstr(pt_errcode(errcode)));
+ return -1;
+ }
+
+ errcode = load_file(&buffer, &size, arg, foffset, fsize, prog);
+ if (errcode < 0)
+ return errcode;
+
+ config->begin = buffer;
+ config->end = buffer + size;
+
+ return 0;
+}
+
+static int load_raw(struct pt_image_section_cache *iscache,
+ struct pt_image *image, char *arg, const char *prog)
+{
+ uint64_t base, foffset, fsize;
+ int isid, errcode, has_base;
+
+ has_base = extract_base(arg, &base);
+ if (has_base <= 0) {
+ fprintf(stderr, "%s: failed to parse base address"
+ "from '%s'.\n", prog, arg);
+ return -1;
+ }
+
+ errcode = preprocess_filename(arg, &foffset, &fsize);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: bad file %s: %s.\n", prog, arg,
+ pt_errstr(pt_errcode(errcode)));
+ return -1;
+ }
+
+ if (!fsize)
+ fsize = UINT64_MAX;
+
+ isid = pt_iscache_add_file(iscache, arg, foffset, fsize, base);
+ if (isid < 0) {
+ fprintf(stderr, "%s: failed to add %s at 0x%" PRIx64 ": %s.\n",
+ prog, arg, base, pt_errstr(pt_errcode(isid)));
+ return -1;
+ }
+
+ errcode = pt_image_add_cached(image, iscache, isid, NULL);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: failed to add %s at 0x%" PRIx64 ": %s.\n",
+ prog, arg, base, pt_errstr(pt_errcode(errcode)));
+ return -1;
+ }
+
+ return 0;
+}
+
+static xed_machine_mode_enum_t translate_mode(enum pt_exec_mode mode)
+{
+ switch (mode) {
+ case ptem_unknown:
+ return XED_MACHINE_MODE_INVALID;
+
+ case ptem_16bit:
+ return XED_MACHINE_MODE_LEGACY_16;
+
+ case ptem_32bit:
+ return XED_MACHINE_MODE_LEGACY_32;
+
+ case ptem_64bit:
+ return XED_MACHINE_MODE_LONG_64;
+ }
+
+ return XED_MACHINE_MODE_INVALID;
+}
+
+static const char *visualize_iclass(enum pt_insn_class iclass)
+{
+ switch (iclass) {
+ case ptic_error:
+ return "unknown/error";
+
+ case ptic_other:
+ return "other";
+
+ case ptic_call:
+ return "near call";
+
+ case ptic_return:
+ return "near return";
+
+ case ptic_jump:
+ return "near jump";
+
+ case ptic_cond_jump:
+ return "cond jump";
+
+ case ptic_far_call:
+ return "far call";
+
+ case ptic_far_return:
+ return "far return";
+
+ case ptic_far_jump:
+ return "far jump";
+
+ case ptic_ptwrite:
+ return "ptwrite";
+ }
+
+ return "undefined";
+}
+
+static void check_insn_iclass(const xed_inst_t *inst,
+ const struct pt_insn *insn, uint64_t offset)
+{
+ xed_category_enum_t category;
+ xed_iclass_enum_t iclass;
+
+ if (!inst || !insn) {
+ printf("[internal error]\n");
+ return;
+ }
+
+ category = xed_inst_category(inst);
+ iclass = xed_inst_iclass(inst);
+
+ switch (insn->iclass) {
+ case ptic_error:
+ break;
+
+ case ptic_ptwrite:
+ case ptic_other:
+ switch (category) {
+ default:
+ return;
+
+ case XED_CATEGORY_CALL:
+ case XED_CATEGORY_RET:
+ case XED_CATEGORY_COND_BR:
+ case XED_CATEGORY_UNCOND_BR:
+ case XED_CATEGORY_INTERRUPT:
+ case XED_CATEGORY_SYSCALL:
+ case XED_CATEGORY_SYSRET:
+ break;
+ }
+ break;
+
+ case ptic_call:
+ if (iclass == XED_ICLASS_CALL_NEAR)
+ return;
+
+ break;
+
+ case ptic_return:
+ if (iclass == XED_ICLASS_RET_NEAR)
+ return;
+
+ break;
+
+ case ptic_jump:
+ if (iclass == XED_ICLASS_JMP)
+ return;
+
+ break;
+
+ case ptic_cond_jump:
+ if (category == XED_CATEGORY_COND_BR)
+ return;
+
+ break;
+
+ case ptic_far_call:
+ switch (iclass) {
+ default:
+ break;
+
+ case XED_ICLASS_CALL_FAR:
+ case XED_ICLASS_INT:
+ case XED_ICLASS_INT1:
+ case XED_ICLASS_INT3:
+ case XED_ICLASS_INTO:
+ case XED_ICLASS_SYSCALL:
+ case XED_ICLASS_SYSCALL_AMD:
+ case XED_ICLASS_SYSENTER:
+ case XED_ICLASS_VMCALL:
+ return;
+ }
+ break;
+
+ case ptic_far_return:
+ switch (iclass) {
+ default:
+ break;
+
+ case XED_ICLASS_RET_FAR:
+ case XED_ICLASS_IRET:
+ case XED_ICLASS_IRETD:
+ case XED_ICLASS_IRETQ:
+ case XED_ICLASS_SYSRET:
+ case XED_ICLASS_SYSRET_AMD:
+ case XED_ICLASS_SYSEXIT:
+ case XED_ICLASS_VMLAUNCH:
+ case XED_ICLASS_VMRESUME:
+ return;
+ }
+ break;
+
+ case ptic_far_jump:
+ if (iclass == XED_ICLASS_JMP_FAR)
+ return;
+
+ break;
+ }
+
+ /* If we get here, @insn->iclass doesn't match XED's classification. */
+ printf("[%" PRIx64 ", %" PRIx64 ": iclass error: iclass: %s, "
+ "xed iclass: %s, category: %s]\n", offset, insn->ip,
+ visualize_iclass(insn->iclass), xed_iclass_enum_t2str(iclass),
+ xed_category_enum_t2str(category));
+
+}
+
+static void check_insn_decode(xed_decoded_inst_t *inst,
+ const struct pt_insn *insn, uint64_t offset)
+{
+ xed_error_enum_t errcode;
+
+ if (!inst || !insn) {
+ printf("[internal error]\n");
+ return;
+ }
+
+ xed_decoded_inst_set_mode(inst, translate_mode(insn->mode),
+ XED_ADDRESS_WIDTH_INVALID);
+
+ /* Decode the instruction (again).
+ *
+ * We may have decoded the instruction already for printing. In this
+ * case, we will decode it twice.
+ *
+ * The more common use-case, however, is to check the instruction class
+ * while not printing instructions since the latter is too expensive for
+ * regular use with long traces.
+ */
+ errcode = xed_decode(inst, insn->raw, insn->size);
+ if (errcode != XED_ERROR_NONE) {
+ printf("[%" PRIx64 ", %" PRIx64 ": xed error: (%u) %s]\n",
+ offset, insn->ip, errcode,
+ xed_error_enum_t2str(errcode));
+ return;
+ }
+
+ if (!xed_decoded_inst_valid(inst)) {
+ printf("[%" PRIx64 ", %" PRIx64 ": xed error: "
+ "invalid instruction]\n", offset, insn->ip);
+ return;
+ }
+}
+
+static void check_insn(const struct pt_insn *insn, uint64_t offset)
+{
+ xed_decoded_inst_t inst;
+
+ if (!insn) {
+ printf("[internal error]\n");
+ return;
+ }
+
+ if (insn->isid <= 0)
+ printf("[%" PRIx64 ", %" PRIx64 ": check error: "
+ "bad isid]\n", offset, insn->ip);
+
+ xed_decoded_inst_zero(&inst);
+ check_insn_decode(&inst, insn, offset);
+
+ /* We need a valid instruction in order to do further checks.
+ *
+ * Invalid instructions have already been diagnosed.
+ */
+ if (!xed_decoded_inst_valid(&inst))
+ return;
+
+ check_insn_iclass(xed_decoded_inst_inst(&inst), insn, offset);
+}
+
+static void print_raw_insn(const struct pt_insn *insn)
+{
+ uint8_t length, idx;
+
+ if (!insn) {
+ printf("[internal error]");
+ return;
+ }
+
+ length = insn->size;
+ if (sizeof(insn->raw) < length)
+ length = sizeof(insn->raw);
+
+ for (idx = 0; idx < length; ++idx)
+ printf(" %02x", insn->raw[idx]);
+
+ for (; idx < pt_max_insn_size; ++idx)
+ printf(" ");
+}
+
+static void xed_print_insn(const xed_decoded_inst_t *inst, uint64_t ip,
+ const struct ptxed_options *options)
+{
+ xed_print_info_t pi;
+ char buffer[256];
+ xed_bool_t ok;
+
+ if (!inst || !options) {
+ printf(" [internal error]");
+ return;
+ }
+
+ if (options->print_raw_insn) {
+ xed_uint_t length, i;
+
+ length = xed_decoded_inst_get_length(inst);
+ for (i = 0; i < length; ++i)
+ printf(" %02x", xed_decoded_inst_get_byte(inst, i));
+
+ for (; i < pt_max_insn_size; ++i)
+ printf(" ");
+ }
+
+ xed_init_print_info(&pi);
+ pi.p = inst;
+ pi.buf = buffer;
+ pi.blen = sizeof(buffer);
+ pi.runtime_address = ip;
+
+ if (options->att_format)
+ pi.syntax = XED_SYNTAX_ATT;
+
+ ok = xed_format_generic(&pi);
+ if (!ok) {
+ printf(" [xed print error]");
+ return;
+ }
+
+ printf(" %s", buffer);
+}
+
+static void print_insn(const struct pt_insn *insn, xed_state_t *xed,
+ const struct ptxed_options *options, uint64_t offset,
+ uint64_t time)
+{
+ if (!insn || !options) {
+ printf("[internal error]\n");
+ return;
+ }
+
+ if (options->print_offset)
+ printf("%016" PRIx64 " ", offset);
+
+ if (options->print_time)
+ printf("%016" PRIx64 " ", time);
+
+ if (insn->speculative)
+ printf("? ");
+
+ printf("%016" PRIx64, insn->ip);
+
+ if (!options->dont_print_insn) {
+ xed_machine_mode_enum_t mode;
+ xed_decoded_inst_t inst;
+ xed_error_enum_t errcode;
+
+ mode = translate_mode(insn->mode);
+
+ xed_state_set_machine_mode(xed, mode);
+ xed_decoded_inst_zero_set_mode(&inst, xed);
+
+ errcode = xed_decode(&inst, insn->raw, insn->size);
+ switch (errcode) {
+ case XED_ERROR_NONE:
+ xed_print_insn(&inst, insn->ip, options);
+ break;
+
+ default:
+ print_raw_insn(insn);
+
+ printf(" [xed decode error: (%u) %s]", errcode,
+ xed_error_enum_t2str(errcode));
+ break;
+ }
+ }
+
+ printf("\n");
+}
+
+static const char *print_exec_mode(enum pt_exec_mode mode)
+{
+ switch (mode) {
+ case ptem_unknown:
+ return "<unknown>";
+
+ case ptem_16bit:
+ return "16-bit";
+
+ case ptem_32bit:
+ return "32-bit";
+
+ case ptem_64bit:
+ return "64-bit";
+ }
+
+ return "<invalid>";
+}
+
+static void print_event(const struct pt_event *event,
+ const struct ptxed_options *options, uint64_t offset)
+{
+ if (!event || !options) {
+ printf("[internal error]\n");
+ return;
+ }
+
+ printf("[");
+
+ if (options->print_offset)
+ printf("%016" PRIx64 " ", offset);
+
+ if (options->print_event_time && event->has_tsc)
+ printf("%016" PRIx64 " ", event->tsc);
+
+ switch (event->type) {
+ case ptev_enabled:
+ printf("%s", event->variant.enabled.resumed ? "resumed" :
+ "enabled");
+
+ if (options->print_event_ip)
+ printf(", ip: %016" PRIx64, event->variant.enabled.ip);
+ break;
+
+ case ptev_disabled:
+ printf("disabled");
+
+ if (options->print_event_ip && !event->ip_suppressed)
+ printf(", ip: %016" PRIx64, event->variant.disabled.ip);
+ break;
+
+ case ptev_async_disabled:
+ printf("disabled");
+
+ if (options->print_event_ip) {
+ printf(", at: %016" PRIx64,
+ event->variant.async_disabled.at);
+
+ if (!event->ip_suppressed)
+ printf(", ip: %016" PRIx64,
+ event->variant.async_disabled.ip);
+ }
+ break;
+
+ case ptev_async_branch:
+ printf("interrupt");
+
+ if (options->print_event_ip) {
+ printf(", from: %016" PRIx64,
+ event->variant.async_branch.from);
+
+ if (!event->ip_suppressed)
+ printf(", to: %016" PRIx64,
+ event->variant.async_branch.to);
+ }
+ break;
+
+ case ptev_paging:
+ printf("paging, cr3: %016" PRIx64 "%s",
+ event->variant.paging.cr3,
+ event->variant.paging.non_root ? ", nr" : "");
+ break;
+
+ case ptev_async_paging:
+ printf("paging, cr3: %016" PRIx64 "%s",
+ event->variant.async_paging.cr3,
+ event->variant.async_paging.non_root ? ", nr" : "");
+
+ if (options->print_event_ip)
+ printf(", ip: %016" PRIx64,
+ event->variant.async_paging.ip);
+ break;
+
+ case ptev_overflow:
+ printf("overflow");
+
+ if (options->print_event_ip && !event->ip_suppressed)
+ printf(", ip: %016" PRIx64, event->variant.overflow.ip);
+ break;
+
+ case ptev_exec_mode:
+ printf("exec mode: %s",
+ print_exec_mode(event->variant.exec_mode.mode));
+
+ if (options->print_event_ip && !event->ip_suppressed)
+ printf(", ip: %016" PRIx64,
+ event->variant.exec_mode.ip);
+ break;
+
+ case ptev_tsx:
+ if (event->variant.tsx.aborted)
+ printf("aborted");
+ else if (event->variant.tsx.speculative)
+ printf("begin transaction");
+ else
+ printf("committed");
+
+ if (options->print_event_ip && !event->ip_suppressed)
+ printf(", ip: %016" PRIx64, event->variant.tsx.ip);
+ break;
+
+ case ptev_stop:
+ printf("stopped");
+ break;
+
+ case ptev_vmcs:
+ printf("vmcs, base: %016" PRIx64, event->variant.vmcs.base);
+ break;
+
+ case ptev_async_vmcs:
+ printf("vmcs, base: %016" PRIx64,
+ event->variant.async_vmcs.base);
+
+ if (options->print_event_ip)
+ printf(", ip: %016" PRIx64,
+ event->variant.async_vmcs.ip);
+ break;
+
+ case ptev_exstop:
+ printf("exstop");
+
+ if (options->print_event_ip && !event->ip_suppressed)
+ printf(", ip: %016" PRIx64, event->variant.exstop.ip);
+ break;
+
+ case ptev_mwait:
+ printf("mwait %" PRIx32 " %" PRIx32,
+ event->variant.mwait.hints, event->variant.mwait.ext);
+
+ if (options->print_event_ip && !event->ip_suppressed)
+ printf(", ip: %016" PRIx64, event->variant.mwait.ip);
+ break;
+
+ case ptev_pwre:
+ printf("pwre c%u.%u", (event->variant.pwre.state + 1) & 0xf,
+ (event->variant.pwre.sub_state + 1) & 0xf);
+
+ if (event->variant.pwre.hw)
+ printf(" hw");
+ break;
+
+
+ case ptev_pwrx:
+ printf("pwrx ");
+
+ if (event->variant.pwrx.interrupt)
+ printf("int: ");
+
+ if (event->variant.pwrx.store)
+ printf("st: ");
+
+ if (event->variant.pwrx.autonomous)
+ printf("hw: ");
+
+ printf("c%u (c%u)", (event->variant.pwrx.last + 1) & 0xf,
+ (event->variant.pwrx.deepest + 1) & 0xf);
+ break;
+
+ case ptev_ptwrite:
+ printf("ptwrite: %" PRIx64, event->variant.ptwrite.payload);
+
+ if (options->print_event_ip && !event->ip_suppressed)
+ printf(", ip: %016" PRIx64, event->variant.ptwrite.ip);
+ break;
+
+ case ptev_tick:
+ printf("tick");
+
+ if (options->print_event_ip && !event->ip_suppressed)
+ printf(", ip: %016" PRIx64, event->variant.tick.ip);
+ break;
+
+ case ptev_cbr:
+ printf("cbr: %x", event->variant.cbr.ratio);
+ break;
+
+ case ptev_mnt:
+ printf("mnt: %" PRIx64, event->variant.mnt.payload);
+ break;
+ }
+
+ printf("]\n");
+}
+
+static void diagnose(struct ptxed_decoder *decoder, uint64_t ip,
+ const char *errtype, int errcode)
+{
+ int err;
+ uint64_t pos;
+
+ err = -pte_internal;
+ pos = 0ull;
+
+ switch (decoder->type) {
+ case pdt_insn_decoder:
+ err = pt_insn_get_offset(decoder->variant.insn, &pos);
+ break;
+
+ case pdt_block_decoder:
+ err = pt_blk_get_offset(decoder->variant.block, &pos);
+ break;
+ }
+
+ if (err < 0) {
+ printf("could not determine offset: %s\n",
+ pt_errstr(pt_errcode(err)));
+ printf("[?, %" PRIx64 ": %s: %s]\n", ip, errtype,
+ pt_errstr(pt_errcode(errcode)));
+ } else
+ printf("[%" PRIx64 ", %" PRIx64 ": %s: %s]\n", pos,
+ ip, errtype, pt_errstr(pt_errcode(errcode)));
+}
+
+#if defined(FEATURE_SIDEBAND)
+
+static int ptxed_sb_event(struct ptxed_decoder *decoder,
+ const struct pt_event *event,
+ const struct ptxed_options *options)
+{
+ struct pt_image *image;
+ int errcode;
+
+ if (!decoder || !event || !options)
+ return -pte_internal;
+
+ image = NULL;
+ errcode = pt_sb_event(decoder->session, &image, event, sizeof(*event),
+ stdout, options->sb_dump_flags);
+ if (errcode < 0)
+ return errcode;
+
+ if (!image)
+ return 0;
+
+ switch (decoder->type) {
+ case pdt_insn_decoder:
+ return pt_insn_set_image(decoder->variant.insn, image);
+
+ case pdt_block_decoder:
+ return pt_blk_set_image(decoder->variant.block, image);
+ }
+
+ return -pte_internal;
+}
+
+#endif /* defined(FEATURE_SIDEBAND) */
+
+static int drain_events_insn(struct ptxed_decoder *decoder, uint64_t *time,
+ int status, const struct ptxed_options *options)
+{
+ struct pt_insn_decoder *ptdec;
+ int errcode;
+
+ if (!decoder || !time || !options)
+ return -pte_internal;
+
+ ptdec = decoder->variant.insn;
+
+ while (status & pts_event_pending) {
+ struct pt_event event;
+ uint64_t offset;
+
+ offset = 0ull;
+ if (options->print_offset) {
+ errcode = pt_insn_get_offset(ptdec, &offset);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ status = pt_insn_event(ptdec, &event, sizeof(event));
+ if (status < 0)
+ return status;
+
+ *time = event.tsc;
+
+ if (!options->quiet && !event.status_update)
+ print_event(&event, options, offset);
+
+#if defined(FEATURE_SIDEBAND)
+ errcode = ptxed_sb_event(decoder, &event, options);
+ if (errcode < 0)
+ return errcode;
+#endif /* defined(FEATURE_SIDEBAND) */
+ }
+
+ return status;
+}
+
+static void decode_insn(struct ptxed_decoder *decoder,
+ const struct ptxed_options *options,
+ struct ptxed_stats *stats)
+{
+ struct pt_insn_decoder *ptdec;
+ xed_state_t xed;
+ uint64_t offset, sync, time;
+
+ if (!decoder || !options) {
+ printf("[internal error]\n");
+ return;
+ }
+
+ xed_state_zero(&xed);
+
+ ptdec = decoder->variant.insn;
+ offset = 0ull;
+ sync = 0ull;
+ time = 0ull;
+ for (;;) {
+ struct pt_insn insn;
+ int status;
+
+ /* Initialize the IP - we use it for error reporting. */
+ insn.ip = 0ull;
+
+ status = pt_insn_sync_forward(ptdec);
+ if (status < 0) {
+ uint64_t new_sync;
+ int errcode;
+
+ if (status == -pte_eos)
+ break;
+
+ diagnose(decoder, insn.ip, "sync error", status);
+
+ /* Let's see if we made any progress. If we haven't,
+ * we likely never will. Bail out.
+ *
+ * We intentionally report the error twice to indicate
+ * that we tried to re-sync. Maybe it even changed.
+ */
+ errcode = pt_insn_get_offset(ptdec, &new_sync);
+ if (errcode < 0 || (new_sync <= sync))
+ break;
+
+ sync = new_sync;
+ continue;
+ }
+
+ for (;;) {
+ status = drain_events_insn(decoder, &time, status,
+ options);
+ if (status < 0)
+ break;
+
+ if (status & pts_eos) {
+ if (!(status & pts_ip_suppressed) &&
+ !options->quiet)
+ printf("[end of trace]\n");
+
+ status = -pte_eos;
+ break;
+ }
+
+ if (options->print_offset || options->check) {
+ int errcode;
+
+ errcode = pt_insn_get_offset(ptdec, &offset);
+ if (errcode < 0)
+ break;
+ }
+
+ status = pt_insn_next(ptdec, &insn, sizeof(insn));
+ if (status < 0) {
+ /* Even in case of errors, we may have succeeded
+ * in decoding the current instruction.
+ */
+ if (insn.iclass != ptic_error) {
+ if (!options->quiet)
+ print_insn(&insn, &xed, options,
+ offset, time);
+ if (stats)
+ stats->insn += 1;
+
+ if (options->check)
+ check_insn(&insn, offset);
+ }
+ break;
+ }
+
+ if (!options->quiet)
+ print_insn(&insn, &xed, options, offset, time);
+
+ if (stats)
+ stats->insn += 1;
+
+ if (options->check)
+ check_insn(&insn, offset);
+ }
+
+ /* We shouldn't break out of the loop without an error. */
+ if (!status)
+ status = -pte_internal;
+
+ /* We're done when we reach the end of the trace stream. */
+ if (status == -pte_eos)
+ break;
+
+ diagnose(decoder, insn.ip, "error", status);
+ }
+}
+
+static int xed_next_ip(uint64_t *pip, const xed_decoded_inst_t *inst,
+ uint64_t ip)
+{
+ xed_uint_t length, disp_width;
+
+ if (!pip || !inst)
+ return -pte_internal;
+
+ length = xed_decoded_inst_get_length(inst);
+ if (!length) {
+ printf("[xed error: failed to determine instruction length]\n");
+ return -pte_bad_insn;
+ }
+
+ ip += length;
+
+ /* If it got a branch displacement it must be a branch.
+ *
+ * This includes conditional branches for which we don't know whether
+ * they were taken. The next IP won't be used in this case as a
+ * conditional branch ends a block. The next block will start with the
+ * correct IP.
+ */
+ disp_width = xed_decoded_inst_get_branch_displacement_width(inst);
+ if (disp_width)
+ ip += xed_decoded_inst_get_branch_displacement(inst);
+
+ *pip = ip;
+ return 0;
+}
+
+static int block_fetch_insn(struct pt_insn *insn, const struct pt_block *block,
+ uint64_t ip, struct pt_image_section_cache *iscache)
+{
+ if (!insn || !block)
+ return -pte_internal;
+
+ /* We can't read from an empty block. */
+ if (!block->ninsn)
+ return -pte_invalid;
+
+ memset(insn, 0, sizeof(*insn));
+ insn->mode = block->mode;
+ insn->isid = block->isid;
+ insn->ip = ip;
+
+ /* The last instruction in a block may be truncated. */
+ if ((ip == block->end_ip) && block->truncated) {
+ if (!block->size || (sizeof(insn->raw) < (size_t) block->size))
+ return -pte_bad_insn;
+
+ insn->size = block->size;
+ memcpy(insn->raw, block->raw, insn->size);
+ } else {
+ int size;
+
+ size = pt_iscache_read(iscache, insn->raw, sizeof(insn->raw),
+ insn->isid, ip);
+ if (size < 0)
+ return size;
+
+ insn->size = (uint8_t) size;
+ }
+
+ return 0;
+}
+
+static void diagnose_block(struct ptxed_decoder *decoder,
+ const char *errtype, int errcode,
+ const struct pt_block *block)
+{
+ uint64_t ip;
+ int err;
+
+ if (!decoder || !block) {
+ printf("ptxed: internal error");
+ return;
+ }
+
+ /* Determine the IP at which to report the error.
+ *
+ * Depending on the type of error, the IP varies between that of the
+ * last instruction in @block or the next instruction outside of @block.
+ *
+ * When the block is empty, we use the IP of the block itself,
+ * i.e. where the first instruction should have been.
+ */
+ if (!block->ninsn)
+ ip = block->ip;
+ else {
+ ip = block->end_ip;
+
+ switch (errcode) {
+ case -pte_nomap:
+ case -pte_bad_insn: {
+ struct pt_insn insn;
+ xed_decoded_inst_t inst;
+ xed_error_enum_t xederr;
+
+ /* Decode failed when trying to fetch or decode the next
+ * instruction. Since indirect or conditional branches
+ * end a block and don't cause an additional fetch, we
+ * should be able to reach that IP from the last
+ * instruction in @block.
+ *
+ * We ignore errors and fall back to the IP of the last
+ * instruction.
+ */
+ err = block_fetch_insn(&insn, block, ip,
+ decoder->iscache);
+ if (err < 0)
+ break;
+
+ xed_decoded_inst_zero(&inst);
+ xed_decoded_inst_set_mode(&inst,
+ translate_mode(insn.mode),
+ XED_ADDRESS_WIDTH_INVALID);
+
+ xederr = xed_decode(&inst, insn.raw, insn.size);
+ if (xederr != XED_ERROR_NONE)
+ break;
+
+ (void) xed_next_ip(&ip, &inst, insn.ip);
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ diagnose(decoder, ip, errtype, errcode);
+}
+
+static void print_block(struct ptxed_decoder *decoder,
+ const struct pt_block *block,
+ const struct ptxed_options *options,
+ const struct ptxed_stats *stats,
+ uint64_t offset, uint64_t time)
+{
+ xed_machine_mode_enum_t mode;
+ xed_state_t xed;
+ uint64_t ip;
+ uint16_t ninsn;
+
+ if (!block || !options) {
+ printf("[internal error]\n");
+ return;
+ }
+
+ if (options->track_blocks) {
+ printf("[block");
+ if (stats)
+ printf(" %" PRIx64, stats->blocks);
+ printf("]\n");
+ }
+
+ mode = translate_mode(block->mode);
+ xed_state_init2(&xed, mode, XED_ADDRESS_WIDTH_INVALID);
+
+ /* There's nothing to do for empty blocks. */
+ ninsn = block->ninsn;
+ if (!ninsn)
+ return;
+
+ ip = block->ip;
+ for (;;) {
+ struct pt_insn insn;
+ xed_decoded_inst_t inst;
+ xed_error_enum_t xederrcode;
+ int errcode;
+
+ if (options->print_offset)
+ printf("%016" PRIx64 " ", offset);
+
+ if (options->print_time)
+ printf("%016" PRIx64 " ", time);
+
+ if (block->speculative)
+ printf("? ");
+
+ printf("%016" PRIx64, ip);
+
+ errcode = block_fetch_insn(&insn, block, ip, decoder->iscache);
+ if (errcode < 0) {
+ printf(" [fetch error: %s]\n",
+ pt_errstr(pt_errcode(errcode)));
+ break;
+ }
+
+ xed_decoded_inst_zero_set_mode(&inst, &xed);
+
+ xederrcode = xed_decode(&inst, insn.raw, insn.size);
+ if (xederrcode != XED_ERROR_NONE) {
+ print_raw_insn(&insn);
+
+ printf(" [xed decode error: (%u) %s]\n", xederrcode,
+ xed_error_enum_t2str(xederrcode));
+ break;
+ }
+
+ if (!options->dont_print_insn)
+ xed_print_insn(&inst, insn.ip, options);
+
+ printf("\n");
+
+ ninsn -= 1;
+ if (!ninsn)
+ break;
+
+ errcode = xed_next_ip(&ip, &inst, ip);
+ if (errcode < 0) {
+ diagnose(decoder, ip, "reconstruct error", errcode);
+ break;
+ }
+ }
+
+ /* Decode should have brought us to @block->end_ip. */
+ if (ip != block->end_ip)
+ diagnose(decoder, ip, "reconstruct error", -pte_nosync);
+}
+
+static void check_block(const struct pt_block *block,
+ struct pt_image_section_cache *iscache,
+ uint64_t offset)
+{
+ struct pt_insn insn;
+ xed_decoded_inst_t inst;
+ uint64_t ip;
+ uint16_t ninsn;
+ int errcode;
+
+ if (!block) {
+ printf("[internal error]\n");
+ return;
+ }
+
+ /* There's nothing to check for an empty block. */
+ ninsn = block->ninsn;
+ if (!ninsn)
+ return;
+
+ if (block->isid <= 0)
+ printf("[%" PRIx64 ", %" PRIx64 ": check error: "
+ "bad isid]\n", offset, block->ip);
+
+ ip = block->ip;
+ do {
+ errcode = block_fetch_insn(&insn, block, ip, iscache);
+ if (errcode < 0) {
+ printf("[%" PRIx64 ", %" PRIx64 ": fetch error: %s]\n",
+ offset, ip, pt_errstr(pt_errcode(errcode)));
+ return;
+ }
+
+ xed_decoded_inst_zero(&inst);
+ check_insn_decode(&inst, &insn, offset);
+
+ /* We need a valid instruction in order to do further checks.
+ *
+ * Invalid instructions have already been diagnosed.
+ */
+ if (!xed_decoded_inst_valid(&inst))
+ return;
+
+ errcode = xed_next_ip(&ip, &inst, ip);
+ if (errcode < 0) {
+ printf("[%" PRIx64 ", %" PRIx64 ": error: %s]\n",
+ offset, ip, pt_errstr(pt_errcode(errcode)));
+ return;
+ }
+ } while (--ninsn);
+
+ /* We reached the end of the block. Both @insn and @inst refer to the
+ * last instruction in @block.
+ *
+ * Check that we reached the end IP of the block.
+ */
+ if (insn.ip != block->end_ip) {
+ printf("[%" PRIx64 ", %" PRIx64 ": error: did not reach end: %"
+ PRIx64 "]\n", offset, insn.ip, block->end_ip);
+ }
+
+ /* Check the last instruction's classification, if available. */
+ insn.iclass = block->iclass;
+ if (insn.iclass)
+ check_insn_iclass(xed_decoded_inst_inst(&inst), &insn, offset);
+}
+
+static int drain_events_block(struct ptxed_decoder *decoder, uint64_t *time,
+ int status, const struct ptxed_options *options)
+{
+ struct pt_block_decoder *ptdec;
+ int errcode;
+
+ if (!decoder || !time || !options)
+ return -pte_internal;
+
+ ptdec = decoder->variant.block;
+
+ while (status & pts_event_pending) {
+ struct pt_event event;
+ uint64_t offset;
+
+ offset = 0ull;
+ if (options->print_offset) {
+ errcode = pt_blk_get_offset(ptdec, &offset);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ status = pt_blk_event(ptdec, &event, sizeof(event));
+ if (status < 0)
+ return status;
+
+ *time = event.tsc;
+
+ if (!options->quiet && !event.status_update)
+ print_event(&event, options, offset);
+
+#if defined(FEATURE_SIDEBAND)
+ errcode = ptxed_sb_event(decoder, &event, options);
+ if (errcode < 0)
+ return errcode;
+#endif /* defined(FEATURE_SIDEBAND) */
+ }
+
+ return status;
+}
+
+static void decode_block(struct ptxed_decoder *decoder,
+ const struct ptxed_options *options,
+ struct ptxed_stats *stats)
+{
+ struct pt_image_section_cache *iscache;
+ struct pt_block_decoder *ptdec;
+ uint64_t offset, sync, time;
+
+ if (!decoder || !options) {
+ printf("[internal error]\n");
+ return;
+ }
+
+ iscache = decoder->iscache;
+ ptdec = decoder->variant.block;
+ offset = 0ull;
+ sync = 0ull;
+ time = 0ull;
+ for (;;) {
+ struct pt_block block;
+ int status;
+
+ /* Initialize IP and ninsn - we use it for error reporting. */
+ block.ip = 0ull;
+ block.ninsn = 0u;
+
+ status = pt_blk_sync_forward(ptdec);
+ if (status < 0) {
+ uint64_t new_sync;
+ int errcode;
+
+ if (status == -pte_eos)
+ break;
+
+ diagnose_block(decoder, "sync error", status, &block);
+
+ /* Let's see if we made any progress. If we haven't,
+ * we likely never will. Bail out.
+ *
+ * We intentionally report the error twice to indicate
+ * that we tried to re-sync. Maybe it even changed.
+ */
+ errcode = pt_blk_get_offset(ptdec, &new_sync);
+ if (errcode < 0 || (new_sync <= sync))
+ break;
+
+ sync = new_sync;
+ continue;
+ }
+
+ for (;;) {
+ status = drain_events_block(decoder, &time, status,
+ options);
+ if (status < 0)
+ break;
+
+ if (status & pts_eos) {
+ if (!(status & pts_ip_suppressed) &&
+ !options->quiet)
+ printf("[end of trace]\n");
+
+ status = -pte_eos;
+ break;
+ }
+
+ if (options->print_offset || options->check) {
+ int errcode;
+
+ errcode = pt_blk_get_offset(ptdec, &offset);
+ if (errcode < 0)
+ break;
+ }
+
+ status = pt_blk_next(ptdec, &block, sizeof(block));
+ if (status < 0) {
+ /* Even in case of errors, we may have succeeded
+ * in decoding some instructions.
+ */
+ if (block.ninsn) {
+ if (stats) {
+ stats->insn += block.ninsn;
+ stats->blocks += 1;
+ }
+
+ if (!options->quiet)
+ print_block(decoder, &block,
+ options, stats,
+ offset, time);
+
+ if (options->check)
+ check_block(&block, iscache,
+ offset);
+ }
+ break;
+ }
+
+ if (stats) {
+ stats->insn += block.ninsn;
+ stats->blocks += 1;
+ }
+
+ if (!options->quiet)
+ print_block(decoder, &block, options, stats,
+ offset, time);
+
+ if (options->check)
+ check_block(&block, iscache, offset);
+ }
+
+ /* We shouldn't break out of the loop without an error. */
+ if (!status)
+ status = -pte_internal;
+
+ /* We're done when we reach the end of the trace stream. */
+ if (status == -pte_eos)
+ break;
+
+ diagnose_block(decoder, "error", status, &block);
+ }
+}
+
+static void decode(struct ptxed_decoder *decoder,
+ const struct ptxed_options *options,
+ struct ptxed_stats *stats)
+{
+ if (!decoder) {
+ printf("[internal error]\n");
+ return;
+ }
+
+ switch (decoder->type) {
+ case pdt_insn_decoder:
+ decode_insn(decoder, options, stats);
+ break;
+
+ case pdt_block_decoder:
+ decode_block(decoder, options, stats);
+ break;
+ }
+}
+
+static int alloc_decoder(struct ptxed_decoder *decoder,
+ const struct pt_config *conf, struct pt_image *image,
+ const struct ptxed_options *options, const char *prog)
+{
+ struct pt_config config;
+ int errcode;
+
+ if (!decoder || !conf || !options || !prog)
+ return -pte_internal;
+
+ config = *conf;
+
+ switch (decoder->type) {
+ case pdt_insn_decoder:
+ if (options->enable_tick_events)
+ config.flags.variant.insn.enable_tick_events = 1;
+
+ decoder->variant.insn = pt_insn_alloc_decoder(&config);
+ if (!decoder->variant.insn) {
+ fprintf(stderr,
+ "%s: failed to create decoder.\n", prog);
+ return -pte_nomem;
+ }
+
+ errcode = pt_insn_set_image(decoder->variant.insn, image);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: failed to set image.\n", prog);
+ return errcode;
+ }
+
+ break;
+
+ case pdt_block_decoder:
+ if (options->enable_tick_events)
+ config.flags.variant.block.enable_tick_events = 1;
+
+ decoder->variant.block = pt_blk_alloc_decoder(&config);
+ if (!decoder->variant.block) {
+ fprintf(stderr,
+ "%s: failed to create decoder.\n", prog);
+ return -pte_nomem;
+ }
+
+ errcode = pt_blk_set_image(decoder->variant.block, image);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: failed to set image.\n", prog);
+ return errcode;
+ }
+
+ break;
+ }
+
+ return 0;
+}
+
+static void print_stats(struct ptxed_stats *stats)
+{
+ if (!stats) {
+ printf("[internal error]\n");
+ return;
+ }
+
+ if (stats->flags & ptxed_stat_insn)
+ printf("insn: %" PRIu64 ".\n", stats->insn);
+
+ if (stats->flags & ptxed_stat_blocks)
+ printf("blocks:\t%" PRIu64 ".\n", stats->blocks);
+}
+
+#if defined(FEATURE_SIDEBAND)
+
+static int ptxed_print_error(int errcode, const char *filename,
+ uint64_t offset, void *priv)
+{
+ const struct ptxed_options *options;
+ const char *errstr, *severity;
+
+ options = (struct ptxed_options *) priv;
+ if (!options)
+ return -pte_internal;
+
+ if (errcode >= 0 && !options->print_sb_warnings)
+ return 0;
+
+ if (!filename)
+ filename = "<unknown>";
+
+ severity = errcode < 0 ? "error" : "warning";
+
+ errstr = errcode < 0
+ ? pt_errstr(pt_errcode(errcode))
+ : pt_sb_errstr((enum pt_sb_error_code) errcode);
+
+ if (!errstr)
+ errstr = "<unknown error>";
+
+ printf("[%s:%016" PRIx64 " sideband %s: %s]\n", filename, offset,
+ severity, errstr);
+
+ return 0;
+}
+
+static int ptxed_print_switch(const struct pt_sb_context *context, void *priv)
+{
+ struct pt_image *image;
+ const char *name;
+
+ if (!priv)
+ return -pte_internal;
+
+ image = pt_sb_ctx_image(context);
+ if (!image)
+ return -pte_internal;
+
+ name = pt_image_name(image);
+ if (!name)
+ name = "<unknown>";
+
+ printf("[context: %s]\n", name);
+
+ return 0;
+}
+
+#if defined(FEATURE_PEVENT)
+
+static int ptxed_sb_pevent(struct ptxed_decoder *decoder, char *filename,
+ const char *prog)
+{
+ struct pt_sb_pevent_config config;
+ uint64_t foffset, fsize, fend;
+ int errcode;
+
+ if (!decoder || !prog) {
+ fprintf(stderr, "%s: internal error.\n", prog ? prog : "?");
+ return -1;
+ }
+
+ errcode = preprocess_filename(filename, &foffset, &fsize);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: bad file %s: %s.\n", prog, filename,
+ pt_errstr(pt_errcode(errcode)));
+ return -1;
+ }
+
+ if (SIZE_MAX < foffset) {
+ fprintf(stderr,
+ "%s: bad offset: 0x%" PRIx64 ".\n", prog, foffset);
+ return -1;
+ }
+
+ config = decoder->pevent;
+ config.filename = filename;
+ config.begin = (size_t) foffset;
+ config.end = 0;
+
+ if (fsize) {
+ fend = foffset + fsize;
+ if ((fend <= foffset) || (SIZE_MAX < fend)) {
+ fprintf(stderr,
+ "%s: bad range: 0x%" PRIx64 "-0x%" PRIx64 ".\n",
+ prog, foffset, fend);
+ return -1;
+ }
+
+ config.end = (size_t) fend;
+ }
+
+ errcode = pt_sb_alloc_pevent_decoder(decoder->session, &config);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: error loading %s: %s.\n", prog, filename,
+ pt_errstr(pt_errcode(errcode)));
+ return -1;
+ }
+
+ return 0;
+}
+
+#endif /* defined(FEATURE_PEVENT) */
+#endif /* defined(FEATURE_SIDEBAND) */
+
+static int get_arg_uint64(uint64_t *value, const char *option, const char *arg,
+ const char *prog)
+{
+ char *rest;
+
+ if (!value || !option || !prog) {
+ fprintf(stderr, "%s: internal error.\n", prog ? prog : "?");
+ return 0;
+ }
+
+ if (!arg || arg[0] == 0 || (arg[0] == '-' && arg[1] == '-')) {
+ fprintf(stderr, "%s: %s: missing argument.\n", prog, option);
+ return 0;
+ }
+
+ errno = 0;
+ *value = strtoull(arg, &rest, 0);
+ if (errno || *rest) {
+ fprintf(stderr, "%s: %s: bad argument: %s.\n", prog, option,
+ arg);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int get_arg_uint32(uint32_t *value, const char *option, const char *arg,
+ const char *prog)
+{
+ uint64_t val;
+
+ if (!get_arg_uint64(&val, option, arg, prog))
+ return 0;
+
+ if (val > UINT32_MAX) {
+ fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option,
+ arg);
+ return 0;
+ }
+
+ *value = (uint32_t) val;
+
+ return 1;
+}
+
+#if defined(FEATURE_SIDEBAND) && defined(FEATURE_PEVENT)
+
+static int get_arg_uint16(uint16_t *value, const char *option, const char *arg,
+ const char *prog)
+{
+ uint64_t val;
+
+ if (!get_arg_uint64(&val, option, arg, prog))
+ return 0;
+
+ if (val > UINT16_MAX) {
+ fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option,
+ arg);
+ return 0;
+ }
+
+ *value = (uint16_t) val;
+
+ return 1;
+}
+
+#endif /* defined(FEATURE_SIDEBAND) && defined(FEATURE_PEVENT) */
+
+static int get_arg_uint8(uint8_t *value, const char *option, const char *arg,
+ const char *prog)
+{
+ uint64_t val;
+
+ if (!get_arg_uint64(&val, option, arg, prog))
+ return 0;
+
+ if (val > UINT8_MAX) {
+ fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option,
+ arg);
+ return 0;
+ }
+
+ *value = (uint8_t) val;
+
+ return 1;
+}
+
+static int ptxed_addr_cfg(struct pt_config *config, uint8_t filter,
+ const char *option, const char *arg, const char *prog)
+{
+ uint64_t addr_cfg;
+
+ if (!config || !option || !arg || !prog) {
+ fprintf(stderr, "%s: internal error.\n", prog ? prog : "?");
+ return 0;
+ }
+
+ if (!get_arg_uint64(&addr_cfg, option, arg, prog))
+ return 0;
+
+ if (15 < addr_cfg) {
+ fprintf(stderr, "%s: %s: value too big: %s.\n", prog, option,
+ arg);
+ return 0;
+ }
+
+ /* Make sure the shift doesn't overflow. */
+ if (15 < filter) {
+ fprintf(stderr, "%s: internal error.\n", prog);
+ return 0;
+ }
+
+ addr_cfg <<= (filter * 4);
+
+ config->addr_filter.config.addr_cfg |= addr_cfg;
+
+ return 1;
+}
+
+extern int main(int argc, char *argv[])
+{
+ struct ptxed_decoder decoder;
+ struct ptxed_options options;
+ struct ptxed_stats stats;
+ struct pt_config config;
+ struct pt_image *image;
+ const char *prog;
+ int errcode, i;
+
+ if (!argc) {
+ help("");
+ return 1;
+ }
+
+ prog = argv[0];
+ image = NULL;
+
+ memset(&options, 0, sizeof(options));
+ memset(&stats, 0, sizeof(stats));
+
+ pt_config_init(&config);
+
+ errcode = ptxed_init_decoder(&decoder);
+ if (errcode < 0) {
+ fprintf(stderr,
+ "%s: error initializing decoder: %s.\n", prog,
+ pt_errstr(pt_errcode(errcode)));
+ goto err;
+ }
+
+#if defined(FEATURE_SIDEBAND)
+ pt_sb_notify_error(decoder.session, ptxed_print_error, &options);
+#endif
+
+ image = pt_image_alloc(NULL);
+ if (!image) {
+ fprintf(stderr, "%s: failed to allocate image.\n", prog);
+ goto err;
+ }
+
+ for (i = 1; i < argc;) {
+ char *arg;
+
+ arg = argv[i++];
+
+ if (strcmp(arg, "--help") == 0 || strcmp(arg, "-h") == 0) {
+ help(prog);
+ goto out;
+ }
+ if (strcmp(arg, "--version") == 0) {
+ version(prog);
+ goto out;
+ }
+ if (strcmp(arg, "--pt") == 0) {
+ if (argc <= i) {
+ fprintf(stderr,
+ "%s: --pt: missing argument.\n", prog);
+ goto out;
+ }
+ arg = argv[i++];
+
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: duplicate pt sources: %s.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (config.cpu.vendor) {
+ errcode = pt_cpu_errata(&config.errata,
+ &config.cpu);
+ if (errcode < 0)
+ printf("[0, 0: config error: %s]\n",
+ pt_errstr(pt_errcode(errcode)));
+ }
+
+ errcode = load_pt(&config, arg, prog);
+ if (errcode < 0)
+ goto err;
+
+ errcode = alloc_decoder(&decoder, &config, image,
+ &options, prog);
+ if (errcode < 0)
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--raw") == 0) {
+ if (argc <= i) {
+ fprintf(stderr,
+ "%s: --raw: missing argument.\n", prog);
+ goto out;
+ }
+ arg = argv[i++];
+
+ errcode = load_raw(decoder.iscache, image, arg, prog);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: --raw: failed to load "
+ "'%s'.\n", prog, arg);
+ goto err;
+ }
+
+ continue;
+ }
+#if defined(FEATURE_ELF)
+ if (strcmp(arg, "--elf") == 0) {
+ uint64_t base;
+
+ if (argc <= i) {
+ fprintf(stderr,
+ "%s: --elf: missing argument.\n", prog);
+ goto out;
+ }
+ arg = argv[i++];
+ base = 0ull;
+ errcode = extract_base(arg, &base);
+ if (errcode < 0)
+ goto err;
+
+ errcode = load_elf(decoder.iscache, image, arg, base,
+ prog, options.track_image);
+ if (errcode < 0)
+ goto err;
+
+ continue;
+ }
+#endif /* defined(FEATURE_ELF) */
+ if (strcmp(arg, "--att") == 0) {
+ options.att_format = 1;
+ continue;
+ }
+ if (strcmp(arg, "--no-inst") == 0) {
+ options.dont_print_insn = 1;
+ continue;
+ }
+ if (strcmp(arg, "--quiet") == 0 || strcmp(arg, "-q") == 0) {
+ options.quiet = 1;
+ continue;
+ }
+ if (strcmp(arg, "--offset") == 0) {
+ options.print_offset = 1;
+ continue;
+ }
+ if (strcmp(arg, "--time") == 0) {
+ options.print_time = 1;
+ continue;
+ }
+ if (strcmp(arg, "--raw-insn") == 0) {
+ options.print_raw_insn = 1;
+
+ continue;
+ }
+ if (strcmp(arg, "--event:time") == 0) {
+ options.print_event_time = 1;
+
+ continue;
+ }
+ if (strcmp(arg, "--event:ip") == 0) {
+ options.print_event_ip = 1;
+
+ continue;
+ }
+ if (strcmp(arg, "--event:tick") == 0) {
+ options.enable_tick_events = 1;
+
+ continue;
+ }
+ if (strcmp(arg, "--filter:addr0_cfg") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before --pt.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (!ptxed_addr_cfg(&config, 0, arg, argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--filter:addr0_a") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before --pt.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (!get_arg_uint64(&config.addr_filter.addr0_a, arg,
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--filter:addr0_b") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before --pt.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (!get_arg_uint64(&config.addr_filter.addr0_b, arg,
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--filter:addr1_cfg") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before --pt.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (!ptxed_addr_cfg(&config, 1, arg, argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--filter:addr1_a") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before --pt.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (!get_arg_uint64(&config.addr_filter.addr1_a, arg,
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--filter:addr1_b") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before --pt.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (!get_arg_uint64(&config.addr_filter.addr1_b, arg,
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--filter:addr2_cfg") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before --pt.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (!ptxed_addr_cfg(&config, 2, arg, argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--filter:addr2_a") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before --pt.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (!get_arg_uint64(&config.addr_filter.addr2_a, arg,
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--filter:addr2_b") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before --pt.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (!get_arg_uint64(&config.addr_filter.addr2_b, arg,
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--filter:addr3_cfg") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before --pt.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (!ptxed_addr_cfg(&config, 3, arg, argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--filter:addr3_a") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before --pt.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (!get_arg_uint64(&config.addr_filter.addr3_a, arg,
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--filter:addr3_b") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before --pt.\n",
+ prog, arg);
+ goto err;
+ }
+
+ if (!get_arg_uint64(&config.addr_filter.addr3_b, arg,
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--check") == 0) {
+ options.check = 1;
+ continue;
+ }
+ if (strcmp(arg, "--iscache-limit") == 0) {
+ uint64_t limit;
+
+ if (!get_arg_uint64(&limit, arg, argv[i++], prog))
+ goto err;
+
+ errcode = pt_iscache_set_limit(decoder.iscache, limit);
+ if (errcode < 0) {
+ fprintf(stderr, "%s: error setting iscache "
+ "limit: %s.\n", prog,
+ pt_errstr(pt_errcode(errcode)));
+ goto err;
+ }
+
+ continue;
+ }
+ if (strcmp(arg, "--stat") == 0) {
+ options.print_stats = 1;
+ continue;
+ }
+ if (strcmp(arg, "--stat:insn") == 0) {
+ stats.flags |= ptxed_stat_insn;
+ continue;
+ }
+ if (strcmp(arg, "--stat:blocks") == 0) {
+ stats.flags |= ptxed_stat_blocks;
+ continue;
+ }
+#if defined(FEATURE_SIDEBAND)
+ if ((strcmp(arg, "--sb:compact") == 0) ||
+ (strcmp(arg, "--sb") == 0)) {
+ options.sb_dump_flags &= ~ptsbp_verbose;
+ options.sb_dump_flags |= ptsbp_compact;
+ continue;
+ }
+ if (strcmp(arg, "--sb:verbose") == 0) {
+ options.sb_dump_flags &= ~ptsbp_compact;
+ options.sb_dump_flags |= ptsbp_verbose;
+ continue;
+ }
+ if (strcmp(arg, "--sb:filename") == 0) {
+ options.sb_dump_flags |= ptsbp_filename;
+ continue;
+ }
+ if (strcmp(arg, "--sb:offset") == 0) {
+ options.sb_dump_flags |= ptsbp_file_offset;
+ continue;
+ }
+ if (strcmp(arg, "--sb:time") == 0) {
+ options.sb_dump_flags |= ptsbp_tsc;
+ continue;
+ }
+ if (strcmp(arg, "--sb:switch") == 0) {
+ pt_sb_notify_switch(decoder.session, ptxed_print_switch,
+ &options);
+ continue;
+ }
+ if (strcmp(arg, "--sb:warn") == 0) {
+ options.print_sb_warnings = 1;
+ continue;
+ }
+#if defined(FEATURE_PEVENT)
+ if (strcmp(arg, "--pevent:primary") == 0) {
+ arg = argv[i++];
+ if (!arg) {
+ fprintf(stderr, "%s: --pevent:primary: "
+ "missing argument.\n", prog);
+ goto err;
+ }
+
+ decoder.pevent.primary = 1;
+ errcode = ptxed_sb_pevent(&decoder, arg, prog);
+ if (errcode < 0)
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--pevent:secondary") == 0) {
+ arg = argv[i++];
+ if (!arg) {
+ fprintf(stderr, "%s: --pevent:secondary: "
+ "missing argument.\n", prog);
+ goto err;
+ }
+
+ decoder.pevent.primary = 0;
+ errcode = ptxed_sb_pevent(&decoder, arg, prog);
+ if (errcode < 0)
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--pevent:sample-type") == 0) {
+ if (!get_arg_uint64(&decoder.pevent.sample_type,
+ "--pevent:sample-type",
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--pevent:time-zero") == 0) {
+ if (!get_arg_uint64(&decoder.pevent.time_zero,
+ "--pevent:time-zero",
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--pevent:time-shift") == 0) {
+ if (!get_arg_uint16(&decoder.pevent.time_shift,
+ "--pevent:time-shift",
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--pevent:time-mult") == 0) {
+ if (!get_arg_uint32(&decoder.pevent.time_mult,
+ "--pevent:time-mult",
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--pevent:tsc-offset") == 0) {
+ if (!get_arg_uint64(&decoder.pevent.tsc_offset,
+ "--pevent:tsc-offset",
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--pevent:kernel-start") == 0) {
+ if (!get_arg_uint64(&decoder.pevent.kernel_start,
+ "--pevent:kernel-start",
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--pevent:sysroot") == 0) {
+ arg = argv[i++];
+ if (!arg) {
+ fprintf(stderr, "%s: --pevent:sysroot: "
+ "missing argument.\n", prog);
+ goto err;
+ }
+
+ decoder.pevent.sysroot = arg;
+ continue;
+ }
+#if defined(FEATURE_ELF)
+ if (strcmp(arg, "--pevent:kcore") == 0) {
+ struct pt_image *kernel;
+ uint64_t base;
+
+ arg = argv[i++];
+ if (!arg) {
+ fprintf(stderr, "%s: --pevent:kcore: "
+ "missing argument.\n", prog);
+ goto err;
+ }
+
+ base = 0ull;
+ errcode = extract_base(arg, &base);
+ if (errcode < 0)
+ goto err;
+
+ kernel = pt_sb_kernel_image(decoder.session);
+
+ errcode = load_elf(decoder.iscache, kernel, arg, base,
+ prog, options.track_image);
+ if (errcode < 0)
+ goto err;
+
+ continue;
+ }
+#endif /* defined(FEATURE_ELF) */
+ if (strcmp(arg, "--pevent:vdso-x64") == 0) {
+ arg = argv[i++];
+ if (!arg) {
+ fprintf(stderr, "%s: --pevent:vdso-x64: "
+ "missing argument.\n", prog);
+ goto err;
+ }
+
+ decoder.pevent.vdso_x64 = arg;
+ continue;
+ }
+ if (strcmp(arg, "--pevent:vdso-x32") == 0) {
+ arg = argv[i++];
+ if (!arg) {
+ fprintf(stderr, "%s: --pevent:vdso-x32: "
+ "missing argument.\n", prog);
+ goto err;
+ }
+
+ decoder.pevent.vdso_x32 = arg;
+ continue;
+ }
+ if (strcmp(arg, "--pevent:vdso-ia32") == 0) {
+ arg = argv[i++];
+ if (!arg) {
+ fprintf(stderr, "%s: --pevent:vdso-ia32: "
+ "missing argument.\n", prog);
+ goto err;
+ }
+
+ decoder.pevent.vdso_ia32 = arg;
+ continue;
+ }
+#endif /* defined(FEATURE_PEVENT) */
+#endif /* defined(FEATURE_SIDEBAND) */
+ if (strcmp(arg, "--cpu") == 0) {
+ /* override cpu information before the decoder
+ * is initialized.
+ */
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify cpu before the pt source file.\n",
+ prog);
+ goto err;
+ }
+ if (argc <= i) {
+ fprintf(stderr,
+ "%s: --cpu: missing argument.\n", prog);
+ goto out;
+ }
+ arg = argv[i++];
+
+ if (strcmp(arg, "auto") == 0) {
+ errcode = pt_cpu_read(&config.cpu);
+ if (errcode < 0) {
+ fprintf(stderr,
+ "%s: error reading cpu: %s.\n",
+ prog,
+ pt_errstr(pt_errcode(errcode)));
+ return 1;
+ }
+ continue;
+ }
+
+ if (strcmp(arg, "none") == 0) {
+ memset(&config.cpu, 0, sizeof(config.cpu));
+ continue;
+ }
+
+ errcode = pt_cpu_parse(&config.cpu, arg);
+ if (errcode < 0) {
+ fprintf(stderr,
+ "%s: cpu must be specified as f/m[/s]\n",
+ prog);
+ goto err;
+ }
+ continue;
+ }
+ if (strcmp(arg, "--mtc-freq") == 0) {
+ if (!get_arg_uint8(&config.mtc_freq, "--mtc-freq",
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--nom-freq") == 0) {
+ if (!get_arg_uint8(&config.nom_freq, "--nom-freq",
+ argv[i++], prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--cpuid-0x15.eax") == 0) {
+ if (!get_arg_uint32(&config.cpuid_0x15_eax,
+ "--cpuid-0x15.eax", argv[i++],
+ prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--cpuid-0x15.ebx") == 0) {
+ if (!get_arg_uint32(&config.cpuid_0x15_ebx,
+ "--cpuid-0x15.ebx", argv[i++],
+ prog))
+ goto err;
+
+ continue;
+ }
+ if (strcmp(arg, "--verbose") == 0 || strcmp(arg, "-v") == 0) {
+ options.track_image = 1;
+ continue;
+ }
+
+ if (strcmp(arg, "--insn-decoder") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before the pt "
+ "source file.\n", arg, prog);
+ goto err;
+ }
+
+ decoder.type = pdt_insn_decoder;
+ continue;
+ }
+
+ if (strcmp(arg, "--block-decoder") == 0) {
+ if (ptxed_have_decoder(&decoder)) {
+ fprintf(stderr,
+ "%s: please specify %s before the pt "
+ "source file.\n", arg, prog);
+ goto err;
+ }
+
+ decoder.type = pdt_block_decoder;
+ continue;
+ }
+
+ if (strcmp(arg, "--block:show-blocks") == 0) {
+ options.track_blocks = 1;
+ continue;
+ }
+
+ if (strcmp(arg, "--block:end-on-call") == 0) {
+ config.flags.variant.block.end_on_call = 1;
+ continue;
+ }
+
+ if (strcmp(arg, "--block:end-on-jump") == 0) {
+ config.flags.variant.block.end_on_jump = 1;
+ continue;
+ }
+
+ fprintf(stderr, "%s: unknown option: %s.\n", prog, arg);
+ goto err;
+ }
+
+ if (!ptxed_have_decoder(&decoder)) {
+ fprintf(stderr, "%s: no pt file.\n", prog);
+ goto err;
+ }
+
+ xed_tables_init();
+
+ /* If we didn't select any statistics, select them all depending on the
+ * decoder type.
+ */
+ if (options.print_stats && !stats.flags) {
+ stats.flags |= ptxed_stat_insn;
+
+ if (decoder.type == pdt_block_decoder)
+ stats.flags |= ptxed_stat_blocks;
+ }
+
+#if defined(FEATURE_SIDEBAND)
+ errcode = pt_sb_init_decoders(decoder.session);
+ if (errcode < 0) {
+ fprintf(stderr,
+ "%s: error initializing sideband decoders: %s.\n",
+ prog, pt_errstr(pt_errcode(errcode)));
+ goto err;
+ }
+#endif /* defined(FEATURE_SIDEBAND) */
+
+ decode(&decoder, &options, options.print_stats ? &stats : NULL);
+
+ if (options.print_stats)
+ print_stats(&stats);
+
+out:
+ ptxed_free_decoder(&decoder);
+ pt_image_free(image);
+ free(config.begin);
+ return 0;
+
+err:
+ ptxed_free_decoder(&decoder);
+ pt_image_free(image);
+ free(config.begin);
+ return 1;
+}
diff --git a/script/perf-copy-mapped-files.bash b/script/perf-copy-mapped-files.bash
new file mode 100755
index 000000000000..3027b4315f6a
--- /dev/null
+++ b/script/perf-copy-mapped-files.bash
@@ -0,0 +1,275 @@
+#! /bin/bash
+#
+# Copyright (c) 2015-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+set -e
+
+prog=`basename $0`
+
+outdir="."
+dryrun=0
+
+buildid_cache=$(perf config buildid.dir || true)
+if [[ -z $buildid_cache ]]; then
+ buildid_cache="$HOME/.debug"
+fi
+
+usage() {
+ cat <<EOF
+usage: $prog [<options>] <perf.data-file>
+
+Scan the perf data file for MMAP records and copy the referenced files to the
+output directory set via the -o option.
+
+options:
+
+ -h this text
+ -o <dir> set the output directory to <dir> (current: $outdir)
+ -b <dir> set the buildid cache directory to <dir> (current: $buildid_cache)
+ -n print commands instead of executing them
+
+<perf.data-file> defaults to perf.data.
+EOF
+}
+
+while getopts "ho:b:n" opt; do
+ case $opt in
+ h)
+ usage
+ exit 0
+ ;;
+ o)
+ outdir=$OPTARG
+ ;;
+ b)
+ buildid_cache=$OPTARG
+ ;;
+ n)
+ dryrun=1
+ ;;
+ esac
+done
+
+shift $(($OPTIND-1))
+
+
+if [[ $# == 0 ]]; then
+ file="perf.data"
+elif [[ $# == 1 ]]; then
+ file="$1"
+ shift
+fi
+
+if [[ $# != 0 ]]; then
+ echo "$prog: unknown argument: $1. use -h for help."
+ exit 1
+fi
+
+
+# Read the vdsos first.
+#
+# This creates the output directory for vdsos.
+#
+have_vdso32=$(which perf-read-vdso32 2>/dev/null || true)
+have_vdsox32=$(which perf-read-vdsox32 2>/dev/null || true)
+
+if [[ $dryrun == 0 ]]; then
+ mkdir -p "$outdir/vdso"
+else
+ echo "mkdir -p $outdir/vdso"
+fi
+
+if [[ -n $have_vdso32 && ! -e "$outdir/vdso/vdso-ia32.so" ]]; then
+ if [[ $dryrun == 0 ]]; then
+ perf-read-vdso32 > "$outdir/vdso/vdso-ia32.so"
+ else
+ echo "perf-read-vdso32 > $outdir/vdso/vdso-ia32.so"
+ fi
+fi
+
+if [[ -n $have_vdsox32 && ! -e "$outdir/vdso/vdso-x32.so" ]]; then
+ if [[ $dryrun == 0 ]]; then
+ perf-read-vdsox32 > "$outdir/vdso/vdso-x32.so"
+ else
+ echo "perf-read-vdsox32 > $outdir/vdso/vdso-x32.so"
+ fi
+fi
+
+# If we have a buildid cache, use it.
+#
+if [[ -d $buildid_cache ]]; then
+ perf buildid-list -i "$file" | gawk -F' ' -- '
+ function run(cmd) {
+ if (dryrun != 0) {
+ printf("%s\n", cmd)
+ } else {
+ system(cmd)
+ }
+ }
+
+ function dirname(file) {
+ items = split(file, parts, "/", seps)
+
+ delete parts[items]
+
+ dname = ""
+ for (part in parts) {
+ dname = dname seps[part-1] parts[part]
+ }
+
+ return dname
+ }
+
+ function copy(src, dst) {
+ # do not overwrite existing files
+ #
+ status = system("ls " dst " > /dev/null 2>&1")
+ if (status == 0) {
+ return
+ }
+
+ dir = dirname(dst)
+
+ run("mkdir -p " dir)
+ run("cp " src " " dst)
+ }
+
+ /^[0-9a-z]+ *\[vdso/ {
+ src = cache "/[vdso]/" $1 "/vdso"
+
+ status = system("file " src " | grep -e \"ELF 64-bit.*x86-64\" >/dev/null 2>&1")
+ if (status == 0) {
+ copy(src, outdir "/vdso/vdso-x64.so")
+ next
+ }
+
+ status = system("file " src " | grep -e \"ELF 32-bit.*x86-64\" >/dev/null 2>&1")
+ if (status == 0) {
+ copy(src, outdir "/vdso/vdso-x32.so")
+ next
+ }
+
+ status = system("file " src " | grep -e \"ELF 32-bit.*Intel 80386\" >/dev/null 2>&1")
+ if (status == 0) {
+ copy(src, outdir "/vdso/vdso-x32.so")
+ next
+ }
+
+ printf("%s: failed to determine flavor of %s.\n", prog, src)
+ next
+ }
+
+ /^[0-9a-z]+ *\[/ {
+ next
+ }
+
+ /^[0-9a-z]+ *.*.ko$/ {
+ next
+ }
+
+ /^[0-9a-z]+ *.*.ko.xz$/ {
+ next
+ }
+
+ /^[0-9a-z]+ *\// {
+ copy(cache $2 "/" $1 "/elf", outdir $2)
+ next
+ }
+
+ ' dryrun="$dryrun" outdir="$outdir" cache="$buildid_cache"
+fi
+
+# Copy files that were referenced but not in the buildid cache.
+#
+# We will skip files we already have so we don't overwrite anything we found in
+# the buildid cache.
+#
+perf script --no-itrace -i "$file" -D | gawk -F' ' -- '
+ function run(cmd) {
+ if (dryrun != 0) {
+ printf("%s\n", cmd)
+ } else {
+ system(cmd)
+ }
+ }
+
+ function dirname(file) {
+ items = split(file, parts, "/", seps)
+
+ delete parts[items]
+
+ dname = ""
+ for (part in parts) {
+ dname = dname seps[part-1] parts[part]
+ }
+
+ return dname
+ }
+
+ function handle_mmap(file) {
+ # ignore any non-absolute filename
+ #
+ # this covers pseudo-files like [kallsyms] or [vdso]
+ #
+ if (substr(file, 0, 1) != "/") {
+ return
+ }
+
+ # ignore kernel modules
+ #
+ # we rely on kcore
+ #
+ if (match(file, /\.ko$/) != 0) {
+ return
+ }
+ if (match(file, /\.ko.xz$/) != 0) {
+ return
+ }
+
+ # ignore //anon
+ #
+ if (file == "//anon") {
+ return
+ }
+
+ dst = outdir file
+
+ # do not overwrite existing files
+ #
+ status = system("ls " dst " > /dev/null 2>&1")
+ if (status == 0) {
+ return
+ }
+
+ dir = dirname(dst)
+
+ run("mkdir -p " dir)
+ run("cp " file " " dst)
+ }
+
+ /PERF_RECORD_MMAP/ { handle_mmap($NF) }
+' dryrun="$dryrun" outdir="$outdir"
diff --git a/script/perf-get-opts.bash b/script/perf-get-opts.bash
new file mode 100755
index 000000000000..cb5e648e5fad
--- /dev/null
+++ b/script/perf-get-opts.bash
@@ -0,0 +1,215 @@
+#! /bin/bash
+#
+# Copyright (c) 2015-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+set -e
+
+prog=`basename $0`
+master=""
+kcore=""
+sysroot=""
+
+usage() {
+ cat <<EOF
+usage: $prog [<options>] <perf.data-file>
+
+Create --pevent options for ptdump and ptxed based on <perf.data-file>
+and previously generated <perf.data-file>-sideband*.pevent files.
+
+When tracing ring-0, use perf-with-kcore and supply the path to kcore_dir
+using the -k option.
+
+options:
+ -h this text
+ -m <file> set <file> as the master sideband file (current: $master)
+ -k <dir> set the kcore directory to <dir> (current: $kcore)
+ -s <dir> set the sysroot directory to <dir> (current: $sysroot)
+
+<perf.data-file> defaults to perf.data.
+EOF
+}
+
+while getopts "hm:k:s:" opt; do
+ case $opt in
+ h)
+ usage
+ exit 0
+ ;;
+ m)
+ master="$OPTARG"
+ ;;
+ k)
+ kcore="$OPTARG"
+ ;;
+ s)
+ sysroot="$OPTARG"
+ ;;
+ esac
+done
+
+shift $(($OPTIND-1))
+
+
+if [[ $# == 0 ]]; then
+ file="perf.data"
+elif [[ $# == 1 ]]; then
+ file="$1"
+ shift
+else
+ usage
+ exit 1
+fi
+
+
+perf script --header-only -i $file | \
+ gawk -F'[ ,]' -- '
+ /^# cpuid : / {
+ vendor = $4
+ family = strtonum($5)
+ model = strtonum($6)
+ stepping = strtonum($7)
+
+ if (vendor == "GenuineIntel") {
+ printf(" --cpu %d/%d/%d", family, model, stepping)
+ }
+ }
+'
+
+perf script --no-itrace -i $file -D | \
+ grep -A18 -e PERF_RECORD_AUXTRACE_INFO | \
+ gawk -F' ' -- '
+ /^ *Time Shift/ { printf(" --pevent:time-shift %s", $NF) }
+ /^ *Time Muliplier/ { printf(" --pevent:time-mult %s", $NF) }
+ /^ *Time Multiplier/ { printf(" --pevent:time-mult %s", $NF) }
+ /^ *Time Zero/ { printf(" --pevent:time-zero %s", $NF) }
+ /^ *TSC:CTC numerator/ { printf(" --cpuid-0x15.ebx %s", $NF) }
+ /^ *TSC:CTC denominator/ { printf(" --cpuid-0x15.eax %s", $NF) }
+ /^ *Max non-turbo ratio/ { printf(" --nom-freq %s", $NF) }
+'
+
+gawk_sample_type() {
+ echo $1 | gawk -- '
+ BEGIN { RS = "[|\n]" }
+ /^TID$/ { config += 0x00002 }
+ /^TIME$/ { config += 0x00004 }
+ /^ID$/ { config += 0x00040 }
+ /^CPU$/ { config += 0x00080 }
+ /^STREAM$/ { config += 0x00200 }
+ /^IDENTIFIER$/ { config += 0x10000 }
+ END {
+ if (config != 0) {
+ printf(" --pevent:sample-type 0x%x", config)
+ }
+ }
+'
+}
+
+attr_sample_types=$(perf evlist -v -i $file | gawk -F' ' -- '
+ BEGIN { RS = "," }
+ /sample_type/ { print $2 }
+' | sort | uniq)
+
+for attr in $attr_sample_types; do
+ # We assume at most one attr with and at most one attr without CPU
+ #
+ if [[ $(echo $attr | grep -e CPU) ]]; then
+ gawk_sample_type $attr
+ else
+ gawk_sample_type $attr
+ fi
+done
+
+perf evlist -v -i $file | grep intel_pt | gawk -F' ' -- '
+ BEGIN { RS = "," }
+ /config/ {
+ config = strtonum($2)
+ mtc_freq = and(rshift(config, 14), 0xf)
+
+ printf(" --mtc-freq 0x%x", mtc_freq)
+ }
+'
+
+if [[ -n "$sysroot" ]]; then
+ echo -n " --pevent:sysroot $sysroot"
+
+ if [[ -r "$sysroot/vdso/vdso-x64.so" ]]; then
+ echo -n " --pevent:vdso-x64 $sysroot/vdso/vdso-x64.so"
+ fi
+
+ if [[ -r "$sysroot/vdso/vdso-x32.so" ]]; then
+ echo -n " --pevent:vdso-x32 $sysroot/vdso/vdso-x32.so"
+ fi
+
+ if [[ -r "$sysroot/vdso/vdso-ia32.so" ]]; then
+ echo -n " --pevent:vdso-ia32 $sysroot/vdso/vdso-ia32.so"
+ fi
+fi
+
+if [[ -n "$kcore" ]]; then
+ if [[ ! -d "$kcore" ]]; then
+ echo "$prog: kcore_dir '$kcore' is not a directory."
+ exit 1
+ fi
+
+ if [[ ! -r "$kcore/kcore" ]]; then
+ echo "$prog: 'kcore' not found in '$kcore' or not readable."
+ exit 1
+ fi
+
+ echo -n " --pevent:kcore $kcore/kcore"
+
+ if [[ ! -r "$kcore/kallsyms" ]]; then
+ echo "$prog: 'kallsyms' not found in '$kcore' or not readable."
+ exit 1
+ fi
+
+ cat "$kcore/kallsyms" | \
+ gawk -M -- '
+ function update_kernel_start(vaddr) {
+ if (vaddr < kernel_start) {
+ kernel_start = vaddr
+ }
+ }
+
+ BEGIN { kernel_start = 0xffffffffffffffff }
+ /^[0-9a-f]+ T _text$/ { update_kernel_start(strtonum("0x" $1)) }
+ /^[0-9a-f]+ T _stext$/ { update_kernel_start(strtonum("0x" $1)) }
+ END {
+ if (kernel_start < 0xffffffffffffffff) {
+ printf(" --pevent:kernel-start 0x%x", kernel_start)
+ }
+ }
+ '
+fi
+
+for sbfile in $(ls -1 "$(basename $file)"-sideband*.pevent 2>/dev/null); do
+ if [[ -z "$master" || "$sbfile" == "$master" ]]; then
+ echo -n " --pevent:primary $sbfile"
+ else
+ echo -n " --pevent:secondary $sbfile"
+ fi
+done
diff --git a/script/perf-read-aux.bash b/script/perf-read-aux.bash
new file mode 100755
index 000000000000..f86ac07397df
--- /dev/null
+++ b/script/perf-read-aux.bash
@@ -0,0 +1,124 @@
+#! /bin/bash
+#
+# Copyright (c) 2015-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+set -e
+
+prog=`basename $0`
+
+usage() {
+ cat <<EOF
+usage: $prog [<options>] <perf.data-file>
+
+Extract the raw AUX area from a perf data file.
+
+options:
+ -h this text
+ -d print commands, don't execute them
+ -S generate one file per AUXTRACE record
+
+<perf.data-file> defaults to perf.data.
+EOF
+}
+
+dry_run=0
+snapshot=0
+while getopts "hdS" opt; do
+ case $opt in
+ h)
+ usage
+ exit 0
+ ;;
+ d)
+ dry_run=1
+ ;;
+ S)
+ snapshot=1
+ ;;
+ esac
+done
+
+shift $(($OPTIND-1))
+
+
+if [[ $# == 0 ]]; then
+ file="perf.data"
+elif [[ $# == 1 ]]; then
+ file="$1"
+ shift
+else
+ usage
+ exit 1
+fi
+
+base="$(basename $file)"
+
+if [[ "$dry_run" == 0 ]]; then
+ nofiles=0
+
+ for ofile in $base-aux-idx*.bin; do
+ if [[ -w $ofile ]]; then
+ echo "$prog: $ofile is in the way."
+ nofiles+=1
+ fi
+ done
+
+ if [[ "$nofiles" > 0 ]]; then
+ exit 1
+ fi
+fi
+
+
+perf script --no-itrace -i "$file" -D | gawk -F' ' -- '
+ /PERF_RECORD_AUXTRACE / {
+ offset = strtonum($1)
+ hsize = strtonum(substr($2, 2))
+ size = strtonum($5)
+ idx = strtonum($11)
+ ext = ""
+
+ if (snapshot != 0) {
+ piece = pieces[idx]
+ pieces[idx] = piece + 1
+
+ ext = sprintf(".%u", piece);
+ }
+
+ ofile = sprintf("%s-aux-idx%d%s.bin", base, idx, ext)
+ begin = offset + hsize
+
+ cmd = sprintf("dd if=%s of=%s conv=notrunc oflag=append ibs=1 skip=%d " \
+ "count=%d status=none", file, ofile, begin, size)
+
+ if (dry_run != 0) {
+ print cmd
+ }
+ else {
+ system(cmd)
+ }
+ }
+' file="$file" base="$base" dry_run="$dry_run" snapshot="$snapshot"
diff --git a/script/perf-read-sideband.bash b/script/perf-read-sideband.bash
new file mode 100755
index 000000000000..4daa805f404a
--- /dev/null
+++ b/script/perf-read-sideband.bash
@@ -0,0 +1,150 @@
+#! /bin/bash
+#
+# Copyright (c) 2015-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+set -e
+
+prog=`basename $0`
+
+usage() {
+ cat <<EOF
+usage: $prog [<options>] <perf.data-file>
+
+Extract the sideband records from a perf data file.
+
+options:
+ -h this text
+ -d print commands, don't execute them
+
+<perf.data-file> defaults to perf.data.
+EOF
+}
+
+dry_run=0
+while getopts "hd" opt; do
+ case $opt in
+ h)
+ usage
+ exit 0
+ ;;
+ d)
+ dry_run=1
+ ;;
+ esac
+done
+
+shift $(($OPTIND-1))
+
+
+if [[ $# == 0 ]]; then
+ file="perf.data"
+elif [[ $# == 1 ]]; then
+ file="$1"
+ shift
+else
+ usage
+ exit 1
+fi
+
+base="$(basename $file)"
+
+if [[ "$dry_run" == 0 ]]; then
+ nofiles=0
+
+ for ofile in $base-sideband-cpu*.pevent $base-sideband.pevent; do
+ if [[ -w $ofile ]]; then
+ echo "$prog: $ofile is in the way."
+ nofiles+=1
+ fi
+ done
+
+ if [[ "$nofiles" > 0 ]]; then
+ exit 1
+ fi
+fi
+
+
+perf script --no-itrace -i "$file" -D | gawk -F' ' -- '
+ function handle_record(ofile, offset, size) {
+ cmd = sprintf("dd if=%s of=%s conv=notrunc oflag=append ibs=1 skip=%d " \
+ "count=%d status=none", file, ofile, offset, size)
+
+ if (dry_run != 0) {
+ print cmd
+ }
+ else {
+ system(cmd)
+ }
+
+ next
+ }
+
+ function handle_global_record(offset, size) {
+ ofile = sprintf("%s-sideband.pevent", base)
+
+ handle_record(ofile, offset, size)
+ }
+
+ function handle_cpu_record(cpu, offset, size) {
+ # (uint32_t) -1 = 4294967295
+ #
+ if (cpu == -1 || cpu == 4294967295) {
+ handle_global_record(offset, size);
+ }
+ else {
+ ofile = sprintf("%s-sideband-cpu%d.pevent", base, cpu)
+
+ handle_record(ofile, offset, size)
+ }
+ }
+
+ /PERF_RECORD_AUXTRACE_INFO/ { next }
+ /PERF_RECORD_AUXTRACE/ { next }
+ /PERF_RECORD_FINISHED_ROUND/ { next }
+
+ /^[0-9]+ [0-9]+ 0x[0-9a-f]+ \[0x[0-9a-f]+\]: PERF_RECORD_/ {
+ cpu = strtonum($1)
+ begin = strtonum($3)
+ size = strtonum(substr($4, 2))
+
+ handle_cpu_record(cpu, begin, size)
+ }
+
+ /^[0-9]+ 0x[0-9a-f]+ \[0x[0-9a-f]+\]: PERF_RECORD_/ {
+ begin = strtonum($2)
+ size = strtonum(substr($3, 2))
+
+ handle_global_record(begin, size)
+ }
+
+ /^0x[0-9a-f]+ \[0x[0-9a-f]+\]: PERF_RECORD_/ {
+ begin = strtonum($1)
+ size = strtonum(substr($2, 2))
+
+ handle_global_record(begin, size)
+ }
+' file="$file" base="$base" dry_run="$dry_run"
diff --git a/script/test.bash b/script/test.bash
new file mode 100755
index 000000000000..c24abc74eaa1
--- /dev/null
+++ b/script/test.bash
@@ -0,0 +1,275 @@
+#! /bin/bash
+#
+# Copyright (c) 2013-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# This script executes ptt tests and compares the output of tools, like
+# ptxed or ptdump, with the expected output from the ptt testfile.
+
+info() {
+ [[ $verbose != 0 ]] && echo -e "$@" >&2
+}
+
+run() {
+ info "$@"
+ "$@"
+}
+
+asm2addr() {
+ local line
+ line=`grep -i ^org "$1"`
+ [[ $? != 0 ]] && return $?
+ echo $line | sed "s/org *//"
+}
+
+usage() {
+ cat <<EOF
+usage: $0 [<options>] <pttfile>...
+
+options:
+ -h this text
+ -v print commands as they are executed
+ -c cpu[,cpu] comma-separated list of cpu's for the tests (see pttc -h, for valid values)
+ -f exit with 1 if any of the tests failed
+ -l only list .diff files
+ -g specify the pttc command (default: pttc)
+ -G specify additional arguments to pttc
+ -d specify the ptdump command (default: ptdump)
+ -D specify additional arguments to ptdump
+ -x specify the ptxed command (default: ptxed)
+ -X specify additional arguments to ptxed
+
+ <pttfile> annotated yasm file ending in .ptt
+EOF
+}
+
+pttc_cmd=pttc
+pttc_arg=""
+ptdump_cmd=ptdump
+ptdump_arg=""
+ptxed_cmd=ptxed
+ptxed_arg=""
+exit_fails=0
+list=0
+verbose=0
+while getopts "hvc:flg:G:d:D:x:X:" option; do
+ case $option in
+ h)
+ usage
+ exit 0
+ ;;
+ v)
+ verbose=1
+ ;;
+ c)
+ cpus=`echo $OPTARG | sed "s/,/ /g"`
+ ;;
+ f)
+ exit_fails=1
+ ;;
+ l)
+ list=1
+ ;;
+ g)
+ pttc_cmd=$OPTARG
+ ;;
+ G)
+ pttc_arg=$OPTARG
+ ;;
+ d)
+ ptdump_cmd=$OPTARG
+ ;;
+ D)
+ ptdump_arg=$OPTARG
+ ;;
+ x)
+ ptxed_cmd=$OPTARG
+ ;;
+ X)
+ ptxed_arg=$OPTARG
+ ;;
+ \?)
+ exit 1
+ ;;
+ esac
+done
+
+shift $(($OPTIND-1))
+
+if [[ $# == 0 ]]; then
+ usage
+ exit 1
+fi
+
+# the exit status
+status=0
+
+ptt-ptdump-opts() {
+ sed -n 's/[ \t]*;[ \t]*opt:ptdump[ \t][ \t]*\(.*\)[ \t]*/\1/p' "$1"
+}
+
+ptt-ptxed-opts() {
+ sed -n 's/[ \t]*;[ \t]*opt:ptxed[ \t][ \t]*\(.*\)[ \t]*/\1/p' "$1"
+}
+
+run-ptt-test() {
+ info "\n# run-ptt-test $@"
+
+ ptt="$1"
+ cpu="$2"
+ base=`basename "${ptt%%.ptt}"`
+
+ if [[ -n "$cpu" ]]; then
+ cpu="--cpu $cpu"
+ fi
+
+ # the following are the files that are generated by pttc
+ pt=$base.pt
+ bin=$base.bin
+ lst=$base.lst
+
+
+ # execute pttc - remove the extra \r in Windows line endings
+ files=`run "$pttc_cmd" $pttc_arg $cpu "$ptt" | sed 's/\r\n/\n/g'`
+ ret=$?
+ if [[ $ret != 0 ]]; then
+ echo "$ptt: $pttc_cmd $pttc_arg failed with $ret" >&2
+ status=1
+ return
+ fi
+
+ exps=""
+ sb=""
+ for file in $files; do
+ case $file in
+ *.sb)
+ sb_base=${file%.sb}
+ sb_part=${sb_base#$base-}
+ sb_prefix=${sb_part%%,*}
+ sb_options=${sb_part#$sb_prefix}
+ sb_prio=${sb_prefix##*-}
+ sb_prefix2=${sb_prefix%-$sb_prio}
+ sb_format=${sb_prefix2##*-}
+
+ sb+=`echo $sb_options | sed -e "s/,/ --$sb_format:/g" -e "s/=/ /g"`
+ sb+=" --$sb_format:$sb_prio $file"
+ ;;
+ *.exp)
+ exps+=" $file"
+ ;;
+ *)
+ echo "$ptt: unexpected $pttc_cmd output '$file'"
+ status=1
+ continue
+ ;;
+ esac
+ done
+
+ if [[ -z $exps ]]; then
+ echo "$ptt: $pttc_cmd $pttc_arg did not produce any .exp file" >&2
+ status=1
+ return
+ fi
+
+ # loop over all .exp files determine the tool, generate .out
+ # files and compare .exp and .out file with diff.
+ # all differences will be
+ for exp in $exps; do
+ exp_base=${exp%%.exp}
+ out=$exp_base.out
+ diff=$exp_base.diff
+ tool=${exp_base##$base-}
+ tool=${tool%%-cpu_*}
+ case $tool in
+ ptxed)
+ addr=`asm2addr "$ptt"`
+ if [[ $? != 0 ]]; then
+ echo "$ptt: org directive not found in test file" >&2
+ status=1
+ continue
+ fi
+ local opts=`ptt-ptxed-opts "$ptt"`
+ opts+=" --no-inst --check"
+ run "$ptxed_cmd" $ptxed_arg --raw $bin:$addr $cpu $opts --pt $pt $sb > $out
+ ;;
+ ptdump)
+ local opts=`ptt-ptdump-opts "$ptt"`
+ run "$ptdump_cmd" $ptdump_arg $cpu $opts $sb $pt > $out
+ ;;
+ *)
+ echo "$ptt: unknown tool '$tool'"
+ status=1
+ continue
+ ;;
+ esac
+ if run diff -ub $exp $out > $diff; then
+ run rm $diff
+ else
+ if [[ $exit_fails != 0 ]]; then
+ status=1
+ fi
+
+ if [[ $list != 0 ]]; then
+ echo $diff
+ else
+ cat $diff
+ fi
+ fi
+ done
+}
+
+ptt-cpus() {
+ sed -n 's/[ \t]*;[ \t]*cpu[ \t][ \t]*\(.*\)[ \t]*/\1/p' "$1"
+}
+
+run-ptt-tests() {
+ local ptt="$1"
+ local cpus=$cpus
+
+ # if no cpus are given on the command-line,
+ # use the cpu directives from the pttfile.
+ if [[ -z $cpus ]]; then
+ cpus=`ptt-cpus "$ptt"`
+ fi
+
+ # if there are no cpu directives in the pttfile,
+ # run the test without any cpu settings.
+ if [[ -z $cpus ]]; then
+ run-ptt-test "$ptt"
+ return
+ fi
+
+ # otherwise run for each cpu the test.
+ for i in $cpus; do
+ run-ptt-test "$ptt" $i
+ done
+}
+
+for ptt in "$@"; do
+ run-ptt-tests "$ptt"
+done
+
+exit $status
diff --git a/sideband/CMakeLists.txt b/sideband/CMakeLists.txt
new file mode 100644
index 000000000000..ff183ab33805
--- /dev/null
+++ b/sideband/CMakeLists.txt
@@ -0,0 +1,65 @@
+# Copyright (c) 2017-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+include_directories(
+ internal/include
+)
+
+set(LIBSB_FILES
+ src/pt_sb_session.c
+ src/pt_sb_context.c
+ src/pt_sb_file.c
+ src/pt_sb_pevent.c
+)
+
+if (CMAKE_HOST_WIN32)
+ add_definitions(
+ # export libipt-sb symbols
+ #
+ /Dpt_sb_export=__declspec\(dllexport\)
+ )
+endif (CMAKE_HOST_WIN32)
+
+add_library(libipt-sb SHARED ${LIBSB_FILES})
+
+# put the version into the header
+#
+configure_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/include/libipt-sb.h.in
+ ${CMAKE_CURRENT_BINARY_DIR}/include/libipt-sb.h
+)
+
+set_target_properties(libipt-sb PROPERTIES
+ PREFIX ""
+ PUBLIC_HEADER ${CMAKE_CURRENT_BINARY_DIR}/include/libipt-sb.h
+ VERSION ${PT_VERSION}
+ SOVERSION ${PT_VERSION_MAJOR}
+)
+
+target_link_libraries(libipt-sb libipt)
+if (PEVENT)
+ target_link_libraries(libipt-sb pevent)
+endif (PEVENT)
diff --git a/sideband/include/libipt-sb.h.in b/sideband/include/libipt-sb.h.in
new file mode 100644
index 000000000000..79f421e76154
--- /dev/null
+++ b/sideband/include/libipt-sb.h.in
@@ -0,0 +1,530 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LIBIPT_SB_H
+#define LIBIPT_SB_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct pt_image_section_cache;
+struct pt_image;
+struct pt_event;
+
+
+/* A macro to mark functions as exported. */
+#ifndef pt_sb_export
+# if defined(__GNUC__)
+# define pt_sb_export __attribute__((visibility("default")))
+# elif defined(_MSC_VER)
+# define pt_sb_export __declspec(dllimport)
+# else
+# error "unknown compiler"
+# endif
+#endif
+
+
+/** The header version. */
+#define LIBIPT_SB_VERSION_MAJOR ${PT_VERSION_MAJOR}
+#define LIBIPT_SB_VERSION_MINOR ${PT_VERSION_MINOR}
+
+#define LIBIPT_SB_VERSION ((LIBIPT_SB_VERSION_MAJOR << 8) + \
+ LIBIPT_SB_VERSION_MINOR)
+
+
+/* Sideband decode errors and warnings. */
+enum pt_sb_error_code {
+ /* No error. Everything is OK. */
+ ptse_ok,
+
+ /* Sideband records have been lost. */
+ ptse_lost,
+
+ /* Trace has been lost. */
+ ptse_trace_lost,
+
+ /* An image section has been lost (ignored). */
+ ptse_section_lost
+};
+
+/** Return a human readable error string. */
+extern pt_sb_export const char *pt_sb_errstr(enum pt_sb_error_code);
+
+
+/* An Intel(R) Processor Trace (Intel PT) sideband tracing session.
+ *
+ * The session serves one Intel PT decoder.
+ *
+ * It is not thread-safe. It doesn't need to be. If a trace stream is decoded
+ * by multiple decoders in parallel, each decoder needs its own sideband tracing
+ * session since each decoder will be looking at the trace at a different point
+ * in time and may see a different memory image.
+ *
+ * A sideband tracing session contains all sideband decoders that are relevant
+ * for that trace stream. We distinguish primary and secondary sideband
+ * channels:
+ *
+ * - primary sideband channels affect decode directly.
+ *
+ * They actively change the Intel PT decoder's memory image on context
+ * switch sideband records.
+ *
+ * For per-cpu trace decode, for example, the sideband channel of the cpu
+ * for which trace is being decoded is a primary sideband channel.
+ *
+ * - secondary sideband channels affect decode indirectly.
+ *
+ * They maintain the memory image for different process contexts but do not
+ * actively switch the Intel PT decoder's memory image. They typically
+ * ignore context switch sideband records.
+ *
+ * They may still directly affect the Intel PT decoder's memory image by
+ * adding new sections while trace in that context is being decoded.
+ *
+ * For per-cpu trace decode, for example, the sideband channels of other
+ * cpus are secondary sideband channels.
+ */
+struct pt_sb_session;
+
+/* Allocate a tracing session.
+ *
+ * If @iscache is not NULL, it will be used for allocating new image sections.
+ *
+ * It is highly recommended to use an image section cache and to use the same
+ * cache for related tracing sessions, e.g. for all cpus in a per-cpu trace.
+ *
+ * Returns a pointer to the new tracing session or NULL if out of memory.
+ */
+extern pt_sb_export struct pt_sb_session *
+pt_sb_alloc(struct pt_image_section_cache *iscache);
+
+/* Free a tracing session.
+ *
+ * Also frees all sideband decoders and memory images contained in @session.
+ */
+extern pt_sb_export void pt_sb_free(struct pt_sb_session *session);
+
+/* Get the image section cache.
+ *
+ * Returns @session's image section cache provided at pt_sb_alloc().
+ */
+extern pt_sb_export struct pt_image_section_cache *
+pt_sb_iscache(struct pt_sb_session *session);
+
+/* Get the kernel image.
+ *
+ * Returns a non-NULL image for the Operating System in @session.
+ *
+ * It is not clear, yet, how virtualization will be handled.
+ *
+ * The returned image will be freed when @session is freed with a call to
+ * pt_sb_free().
+ */
+extern pt_sb_export struct pt_image *
+pt_sb_kernel_image(struct pt_sb_session *session);
+
+/* A sideband decode error/warning notifier.
+ *
+ * It will be called by sideband decoders to report @errcode encountered while
+ * processing sideband at @offset in @filename. Fatal errors will further cause
+ * the sideband decoder to be removed. Non-fatal errors and warnings will
+ * otherwise be ignored.
+ *
+ * Positive @errcode numbers are enum pt_sb_error_code constants.
+ * Negative @errcode numbers are enum pt_error_code constants.
+ *
+ * It shall return zero on success, a negative pt_error_code otherwise.
+ */
+typedef int (pt_sb_error_notifier_t)(int errcode, const char *filename,
+ uint64_t offset, void *priv);
+
+/* Install an error notifier.
+ *
+ * If @notifier is not NULL, will be called on errors and warnings encountered
+ * by sideband decoders.
+ *
+ * Returns the previously installed notifier or NULL.
+ */
+extern pt_sb_export pt_sb_error_notifier_t *
+pt_sb_notify_error(struct pt_sb_session *session,
+ pt_sb_error_notifier_t *notifier, void *priv);
+
+/* Initialize newly added decoders.
+ *
+ * Initialize decoders that have been added since pt_sb_alloc() or since the
+ * last pt_sb_init_decoders() call by fetching their first sideband record.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern pt_sb_export int pt_sb_init_decoders(struct pt_sb_session *session);
+
+/* Apply an event to all sideband decoders contained in a session.
+ *
+ * Applies @event to all decoders in @session. This may involve a series of
+ * @apply and subsequent @fetch calls. See comments on @apply and @fetch for
+ * details.
+ *
+ * Decoders that return an error will be removed from @session and freed.
+ *
+ * Primary decoders are offered @image and may change it to point to a new
+ * memory image.
+ *
+ * For debugging purposes, decoders are also asked to @print the current record
+ * to @stream according to @flags. Pass a NULL @stream to ask decoders to not
+ * print anything.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern pt_sb_export int pt_sb_event(struct pt_sb_session *session,
+ struct pt_image **image,
+ const struct pt_event *event, size_t size,
+ FILE *stream, uint32_t flags);
+
+/* Dump sideband records up to a given timestamp.
+ *
+ * Asks all sideband decoders in @session to @print their current record to
+ * @stream according to @flags and @fetch the next record as long as the current
+ * record's timestamp is smaller or equal to @tsc.
+ *
+ * Decoders that return an error will be removed from @session and freed.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern pt_sb_export int pt_sb_dump(struct pt_sb_session *session, FILE *stream,
+ uint32_t flags, uint64_t tsc);
+
+
+/* A process context.
+ *
+ * We maintain a separate image per process so we can switch between them
+ * easily. Each image contains both user-space and kernel-space.
+ *
+ * Image sections are shared between processes using an image section cache.
+ *
+ * Process contexts are not thread-safe. The process memory image changes over
+ * time depending on sideband information. Sections of trace between process
+ * image changes can be decoded in parallel but threads will need to synchronize
+ * across process image changes.
+ */
+struct pt_sb_context;
+
+/* Get a context reference.
+ *
+ * Increment @context's use count.
+ */
+extern pt_sb_export int pt_sb_ctx_get(struct pt_sb_context *context);
+
+/* Put a context reference.
+ *
+ * Decrement @context's use count and free @context when it reaches zero.
+ */
+extern pt_sb_export int pt_sb_ctx_put(struct pt_sb_context *context);
+
+/* Get the context's memory image.
+ *
+ * The caller must hold a reference to @context as long as the image is used.
+ *
+ * Returns a non-NULL memory image for @context.
+ */
+extern pt_sb_export struct pt_image *
+pt_sb_ctx_image(const struct pt_sb_context *context);
+
+/* Map a file section into a context's image.
+ *
+ * Adds a section of @size bytes from @filename starting at @offset to @context's
+ * image at @vaddr.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern pt_sb_export int pt_sb_ctx_mmap(struct pt_sb_session *session,
+ struct pt_sb_context *context,
+ const char *filename, uint64_t offset,
+ uint64_t size, uint64_t vaddr);
+
+/* Switch to context's image.
+ *
+ * Install @context->image in @image. The caller is responsible for holding a
+ * reference to @context as long as its image is in use.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern pt_sb_export int
+pt_sb_ctx_switch_to(struct pt_image **image, struct pt_sb_session *session,
+ const struct pt_sb_context *context);
+
+/* A context switch notifier.
+ *
+ * It shall return zero on success, a negative pt_error_code otherwise.
+ */
+typedef int (pt_sb_ctx_switch_notifier_t)(const struct pt_sb_context *,
+ void *priv);
+
+/* Install a context-switch notifier.
+ *
+ * If @notifier is not NULL, will be called with the switched-to context on a
+ * context switch via pt_sb_ctx_switch_to().
+ *
+ * Returns the previously installed notifier or NULL.
+ */
+extern pt_sb_export pt_sb_ctx_switch_notifier_t *
+pt_sb_notify_switch(struct pt_sb_session *session,
+ pt_sb_ctx_switch_notifier_t *notifier, void *priv);
+
+/* Get the context for pid.
+ *
+ * Provide a non-NULL process context for @pid in @context. This may create a
+ * new context if no context for @pid exists in @session. The new context is
+ * populated with kernel image sections.
+ *
+ * This does not provide a new reference to @context. Use pt_sb_ctx_get() if
+ * you need to keep the context.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern pt_sb_export int
+pt_sb_get_context_by_pid(struct pt_sb_context **context,
+ struct pt_sb_session *session, uint32_t pid);
+
+/* Find a context by pid.
+ *
+ * Provide a non-NULL process context for @pid in @context if it exists in
+ * @session. This does not provide a new reference to @context. Use
+ * pt_sb_ctx_get() if you need to keep the context.
+ *
+ * Provide a NULL process context in @context if a context for @pid does not
+ * exist in @session.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern pt_sb_export int
+pt_sb_find_context_by_pid(struct pt_sb_context **context,
+ struct pt_sb_session *session, uint32_t pid);
+
+/* Remove a context.
+ *
+ * Removes @context from @session and puts @session's reference to @context.
+ * Future lookups won't find @context but it won't be freed until the last user
+ * puts it.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern pt_sb_export int pt_sb_remove_context(struct pt_sb_session *session,
+ struct pt_sb_context *context);
+
+
+/* A collection of print options. */
+enum pt_sb_print_flag {
+ /* Print sideband records in compact mode. */
+ ptsbp_compact = 1 << 0,
+
+ /* Print sideband records in verbose mode. */
+ ptsbp_verbose = 1 << 1,
+
+ /* Print the sideband filename. */
+ ptsbp_filename = 1 << 2,
+
+ /* Print the offset into the sideband file. */
+ ptsbp_file_offset = 1 << 3,
+
+ /* Print the sideband record's timestamp. */
+ ptsbp_tsc = 1 << 4
+
+};
+
+/* An Intel PT sideband decoder configuration. */
+struct pt_sb_decoder_config {
+ /* The size of the config structure in bytes. */
+ size_t size;
+
+ /* Fetch the next sideband record and provide its timestamp.
+ *
+ * Return zero on success, a negative error code otherwise.
+ */
+ int (*fetch)(struct pt_sb_session *session, uint64_t *tsc, void *priv);
+
+ /* Apply the current sideband record.
+ *
+ * For master sideband channels, @image will be non-NULL and point to
+ * the image object that is currently used. If the image shall be
+ * switched, set @image to the new image to be used.
+ *
+ * For secondary sideband channels, @image will be NULL.
+ *
+ * The @event argument points to a pt_event object. Unknown event types
+ * shall be ignored.
+ *
+ * Initially, it will be passed to sideband decoders in the order of
+ * their next record's timestamp. It must only be applied to the
+ * current sideband record.
+ *
+ * If the record's timestamp is smaller or equal to the event's
+ * timestamp, @fetch will be called to fetch the next sideband record,
+ * and @apply will be called again for the new sideband record with the
+ * same @event.
+ *
+ * This repeats until the @event's timestamp is smaller than the current
+ * record's timestamp.
+ *
+ * The event will then be passed to all sideband decoders irrespective
+ * of their next record's timestamp. This allows sideband decoders to
+ * postpone actions until a suitable event.
+ *
+ * Return zero on success, a negative error code otherwise.
+ */
+ int (*apply)(struct pt_sb_session *session, struct pt_image **image,
+ const struct pt_event *event, void *priv);
+
+ /* Print the current sideband record.
+ *
+ * The output shall be determined based on @flags, which is a bit-vector
+ * of enum pt_sb_print_flag. A value of zero means that only errors
+ * shall be printed.
+ *
+ * Return zero on success, a negative error code otherwise.
+ */
+ int (*print)(struct pt_sb_session *session, FILE *stream,
+ uint32_t flags, void *priv);
+
+ /* Destroy the private data. */
+ void (*dtor)(void *priv);
+
+ /* Decoder-specific private data. */
+ void *priv;
+
+ /* A collection of configuration flags saying:
+ *
+ * - whether this is a primary decoder (secondary if clear).
+ */
+ uint32_t primary:1;
+};
+
+/* Add an Intel PT sideband decoder.
+ *
+ * Allocate a new sideband decoder based on @config and add it to @session.
+ *
+ * The sideband decoder will automatically be freed when @session is freed with
+ * a call to pt_sb_free() or when it is removed from @session after returning an
+ * error from one of its callback functions.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+extern pt_sb_export int
+pt_sb_alloc_decoder(struct pt_sb_session *session,
+ const struct pt_sb_decoder_config *config);
+
+
+/* The configuration for a Linux perf event sideband decoder. */
+struct pt_sb_pevent_config {
+ /* The size of the config structure in bytes. */
+ size_t size;
+
+ /* The name of the file containing the sideband data. */
+ const char *filename;
+
+ /* The offset into the file from which to start reading. */
+ size_t begin;
+
+ /* The optional end offset into the file at which to stop reading.
+ *
+ * Zero means read until the end of the file.
+ */
+ size_t end;
+
+ /* The optional system root directory.
+ *
+ * If not NULL, this is prepended to every filename referenced in perf
+ * event sideband records.
+ */
+ const char *sysroot;
+
+ /* The optional 64-bit vdso. */
+ const char *vdso_x64;
+
+ /* The optional x32 vdso. */
+ const char *vdso_x32;
+
+ /* The optional 32-bit vdso. */
+ const char *vdso_ia32;
+
+ /* An offset to be subtracted from every perf event record timestamp.
+ *
+ * This applies perf event records a little bit earlier to compensate
+ * for too coarse timing.
+ */
+ uint64_t tsc_offset;
+
+ /* The respective field in struct perf_event_attr.
+ *
+ * We require sample_id_all in struct perf_event_attr to be set.
+ */
+ uint64_t sample_type;
+
+ /* The start address of the kernel.
+ *
+ * This is used to distinguish kernel from user addresses:
+ *
+ * kernel >= @kernel_start
+ * user < @kernel_start
+ *
+ * Set to UINT64_MAX if ring-0 is not being traced.
+ */
+ uint64_t kernel_start;
+
+ /* The respective fields in struct perf_event_mmap_page. */
+ uint16_t time_shift;
+ uint32_t time_mult;
+ uint64_t time_zero;
+
+ /* A collection of configuration flags saying:
+ *
+ * - whether this is a primary decoder (secondary if clear).
+ */
+ uint32_t primary:1;
+};
+
+/* Allocate a Linux perf event sideband decoder.
+ *
+ * Allocates a sideband decoder for the Linux perf event format based on @config
+ * and adds it to @session.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern pt_sb_export int
+pt_sb_alloc_pevent_decoder(struct pt_sb_session *session,
+ const struct pt_sb_pevent_config *config);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* LIBIPT_SB_H */
diff --git a/sideband/internal/include/pt_sb_context.h b/sideband/internal/include/pt_sb_context.h
new file mode 100644
index 000000000000..f65b0a470af5
--- /dev/null
+++ b/sideband/internal/include/pt_sb_context.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SB_CONTEXT_H
+#define PT_SB_CONTEXT_H
+
+#include <stdint.h>
+
+struct pt_image;
+
+
+/* The ABI of the process. */
+enum pt_sb_abi {
+ pt_sb_abi_unknown,
+ pt_sb_abi_x64,
+ pt_sb_abi_x32,
+ pt_sb_abi_ia32
+};
+
+struct pt_sb_context {
+ /* The next context in a linear list of process contexts.
+ *
+ * I do not expect more than a few processes per tracing session. And
+ * if we had many processes, we'd also have trace spanning many context
+ * switches and sideband decode won't be the bottleneck.
+ *
+ * This field is owned by the sideband tracing session to which this
+ * context belongs.
+ */
+ struct pt_sb_context *next;
+
+ /* The memory image of that process.
+ *
+ * You must hold a reference to this context as long as @image is used.
+ */
+ struct pt_image *image;
+
+ /* The ABI of the process.
+ *
+ * This may be relevant for some but not all sideband formats.
+ *
+ * This field is collectively owned by all sideband decoders.
+ */
+ enum pt_sb_abi abi;
+
+ /* We identify processes by their process id.
+ *
+ * Intel PT provides CR3 and VMCS Base to identify address-spaces and
+ * notifies us about changes. But at least on Linux, we don't get the
+ * CR3 and all sideband records refer to pid/tid, so we're using those.
+ */
+ uint32_t pid;
+
+ /* The number of current users.
+ *
+ * We remove a context when the process exits but we keep the context
+ * object and its image alive as long as they are used.
+ */
+ uint16_t ucount;
+};
+
+/* Allocate a context.
+ *
+ * Allocate a context and an image. The optional @name argument is given to the
+ * context's image.
+ *
+ * The context's use-count is initialized to one. Use pt_sb_ctx_put() to free
+ * the returned context and its image.
+ *
+ * Returns a non-NULL context or NULL when out of memory.
+ */
+extern struct pt_sb_context *pt_sb_ctx_alloc(const char *name);
+
+#endif /* PT_SB_CONTEXT_H */
diff --git a/sideband/internal/include/pt_sb_decoder.h b/sideband/internal/include/pt_sb_decoder.h
new file mode 100644
index 000000000000..9a2cde451732
--- /dev/null
+++ b/sideband/internal/include/pt_sb_decoder.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SB_DECODER_H
+#define PT_SB_DECODER_H
+
+#include <stdio.h>
+
+
+/* An Intel PT sideband decoder. */
+struct pt_sb_decoder {
+ /* The next Intel PT sideband decoder in a linear list of Intel PT
+ * sideband decoders ordered by @tsc (ascending).
+ */
+ struct pt_sb_decoder *next;
+
+ /* The timestamp of the next sideband record. */
+ uint64_t tsc;
+
+ /* Decoder functions provided by the decoder supplier:
+ *
+ * - fetch the next sideband record.
+ */
+ int (*fetch)(struct pt_sb_session *session, uint64_t *tsc, void *priv);
+
+ /* - apply the current sideband record. */
+ int (*apply)(struct pt_sb_session *session, struct pt_image **image,
+ const struct pt_event *event, void *priv);
+
+ /* - print the current sideband record. */
+ int (*print)(struct pt_sb_session *session, FILE *stream,
+ uint32_t flags, void *priv);
+
+ /* - destroy the decoder's private data. */
+ void (*dtor)(void *priv);
+
+ /* Decoder-specific private data. */
+ void *priv;
+
+ /* A flag saying whether this is a primary or secondary decoder. */
+ uint32_t primary:1;
+};
+
+#endif /* PT_SB_DECODER_H */
diff --git a/sideband/internal/include/pt_sb_file.h b/sideband/internal/include/pt_sb_file.h
new file mode 100644
index 000000000000..d97520c10a83
--- /dev/null
+++ b/sideband/internal/include/pt_sb_file.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SB_FILE_H
+#define PT_SB_FILE_H
+
+#include <stddef.h>
+
+
+/* Load a file section.
+ *
+ * Allocates a large enough buffer and copies the contents of @file from @begin
+ * to @end into it. If @end is zero, reads from @begin until the end of @file.
+ *
+ * On success, provides the buffer in @buffer and its size in @size.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern int pt_sb_file_load(void **buffer, size_t *size, const char *filename,
+ size_t begin, size_t end);
+
+#endif /* PT_SB_FILE_H */
diff --git a/sideband/internal/include/pt_sb_pevent.h b/sideband/internal/include/pt_sb_pevent.h
new file mode 100644
index 000000000000..ed767be15c73
--- /dev/null
+++ b/sideband/internal/include/pt_sb_pevent.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SB_PEVENT_H
+#define PT_SB_PEVENT_H
+
+#include "pevent.h"
+
+
+/* The estimated code location. */
+enum pt_sb_pevent_loc {
+ /* We do not know where we are. */
+ ploc_unknown,
+
+ /* We are in kernel space. */
+ ploc_in_kernel,
+
+ /* We are in user space. */
+ ploc_in_user,
+
+ /* We are likely in kernel space. */
+ ploc_likely_in_kernel,
+
+ /* We are likely in user space. */
+ ploc_likely_in_user
+};
+
+/* A Linux perf event decoder's private data. */
+struct pt_sb_pevent_priv {
+ /* The sideband filename for printing.
+ *
+ * This is a copy of the filename provided by the user when allocating
+ * the sideband decoder.
+ */
+ char *filename;
+
+ /* The optional system root directory.
+ *
+ * If not NULL, this is prepended to every filename referenced in perf
+ * event sideband records.
+ *
+ * This is a copy of the sysroot provided by the user when allocating
+ * the sideband decoder.
+ */
+ char *sysroot;
+
+ /* The optional 64-bit vdso.
+ *
+ * If not NULL, this is used for [vdso] mmaps in 64-bit processes.
+ *
+ * This is a copy of the vdso filename provided by the user when
+ * allocating the sideband decoder.
+ */
+ char *vdso_x64;
+
+ /* The optional x32 vdso.
+ *
+ * If not NULL, this is used for [vdso] mmaps in x32 processes.
+ *
+ * This is a copy of the vdso filename provided by the user when
+ * allocating the sideband decoder.
+ */
+ char *vdso_x32;
+
+ /* The optional 32-bit vdso.
+ *
+ * If not NULL, this is used for [vdso] mmaps in 32-bit processes.
+ *
+ * This is a copy of the vdso filename provided by the user when
+ * allocating the sideband decoder.
+ */
+ char *vdso_ia32;
+
+ /* The begin and end of the sideband data in memory. */
+ uint8_t *begin, *end;
+
+ /* The position of the current and the next record in the sideband
+ * buffer.
+ *
+ * The current position is the position of @event or NULL.
+ * the next position is the position from which to fetch.
+ */
+ const uint8_t *current, *next;
+
+ /* The libpevent configuration. */
+ struct pev_config pev;
+
+ /* The current perf event record. */
+ struct pev_event event;
+
+ /* The current process context.
+ *
+ * This is NULL if there is no current context.
+ * Otherwise, holds a reference to @context (put after use).
+ */
+ struct pt_sb_context *context;
+
+ /* The next process context.
+ *
+ * This is NULL if we're not waiting to switch contexts.
+ * Otherwise, holds a reference to @context (put after use).
+ */
+ struct pt_sb_context *next_context;
+
+ /* The start address of the kernel.
+ *
+ * This is used to distinguish kernel from user addresses:
+ *
+ * kernel >= @kernel_start
+ * user < @kernel_start
+ *
+ * This is only required when tracing ring-0.
+ */
+ uint64_t kernel_start;
+
+ /* An offset to be subtracted from every sideband record timestamp.
+ *
+ * This applies sideband records a little bit earlier to compensate for
+ * too coarse timing.
+ */
+ uint64_t tsc_offset;
+
+ /* The current code location estimated from previous events. */
+ enum pt_sb_pevent_loc location;
+};
+
+extern int pt_sb_pevent_init(struct pt_sb_pevent_priv *priv,
+ const struct pt_sb_pevent_config *config);
+
+#endif /* PT_SB_PEVENT_H */
diff --git a/sideband/internal/include/pt_sb_session.h b/sideband/internal/include/pt_sb_session.h
new file mode 100644
index 000000000000..7c52e27bb0f9
--- /dev/null
+++ b/sideband/internal/include/pt_sb_session.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SB_SESSION_H
+#define PT_SB_SESSION_H
+
+#include "libipt-sb.h"
+
+struct pt_image_section_cache;
+struct pt_image;
+struct pt_sb_context;
+struct pt_sb_decoder;
+
+
+struct pt_sb_session {
+ /* The image section cache to use for new image sections.
+ *
+ * This allows sharing image sections across contexts.
+ */
+ struct pt_image_section_cache *iscache;
+
+ /* A linear list of contexts in no particular order. */
+ struct pt_sb_context *contexts;
+
+ /* The kernel memory image.
+ *
+ * Just like process images, the kernel image may change over time. It
+ * is used to populate new process images.
+ *
+ * This assumes that the full kernel is mapped into every process.
+ */
+ struct pt_image *kernel;
+
+ /* A list of sideband decoders ordered by their @tsc (ascending). */
+ struct pt_sb_decoder *decoders;
+
+ /* A list of newly added sideband decoders in no particular order.
+ *
+ * Use pt_sb_init_decoders() to fetch the first record and move them to
+ * @decoders.
+ */
+ struct pt_sb_decoder *waiting;
+
+ /* A list of retired sideband decoders in no particular order.
+ *
+ * They ran out of trace but might still have a postponed effect
+ * pending. We present events to them until @apply() returns -pte_eos.
+ */
+ struct pt_sb_decoder *retired;
+
+ /* A list of removed sideband decoders in no particular order.
+ *
+ * They wait for their destruction when the session is destroyed.
+ */
+ struct pt_sb_decoder *removed;
+
+ /* An optional callback function to be called on sideband decode errors
+ * and warnings.
+ */
+ pt_sb_error_notifier_t *notify_error;
+
+ /* The private data for the error notifier. */
+ void *priv_error;
+
+ /* An optional callback function to be called on context switches. */
+ pt_sb_ctx_switch_notifier_t *notify_switch_to;
+
+ /* The private data for the context switch notifier. */
+ void *priv_switch_to;
+};
+
+
+extern int pt_sb_error(const struct pt_sb_session *session, int errcode,
+ const char *filename, uint64_t offset);
+
+#endif /* PT_SB_SESSION_H */
diff --git a/sideband/src/pt_sb_context.c b/sideband/src/pt_sb_context.c
new file mode 100644
index 000000000000..32baa14ac118
--- /dev/null
+++ b/sideband/src/pt_sb_context.c
@@ -0,0 +1,162 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_sb_context.h"
+#include "pt_sb_session.h"
+
+#include "libipt-sb.h"
+#include "intel-pt.h"
+
+#include <stdlib.h>
+
+
+struct pt_sb_context *pt_sb_ctx_alloc(const char *name)
+{
+ struct pt_sb_context *context;
+ struct pt_image *image;
+
+ image = pt_image_alloc(name);
+ if (!image)
+ return NULL;
+
+ context = malloc(sizeof(*context));
+ if (!context) {
+ pt_image_free(image);
+ return NULL;
+ }
+
+ memset(context, 0, sizeof(*context));
+ context->image = image;
+ context->ucount = 1;
+
+ return context;
+}
+
+int pt_sb_ctx_get(struct pt_sb_context *context)
+{
+ uint16_t ucount;
+
+ if (!context)
+ return -pte_invalid;
+
+ ucount = context->ucount;
+ if (UINT16_MAX <= ucount)
+ return -pte_overflow;
+
+ context->ucount = ucount + 1;
+
+ return 0;
+}
+
+static void pt_sb_ctx_free(struct pt_sb_context *context)
+{
+ if (!context)
+ return;
+
+ pt_image_free(context->image);
+ free(context);
+}
+
+int pt_sb_ctx_put(struct pt_sb_context *context)
+{
+ uint16_t ucount;
+
+ if (!context)
+ return -pte_invalid;
+
+ ucount = context->ucount;
+ if (ucount > 1) {
+ context->ucount = ucount - 1;
+ return 0;
+ }
+
+ if (!ucount)
+ return -pte_internal;
+
+ pt_sb_ctx_free(context);
+
+ return 0;
+}
+
+struct pt_image *pt_sb_ctx_image(const struct pt_sb_context *context)
+{
+ if (!context)
+ return NULL;
+
+ return context->image;
+}
+
+int pt_sb_ctx_mmap(struct pt_sb_session *session, struct pt_sb_context *context,
+ const char *filename, uint64_t offset, uint64_t size,
+ uint64_t vaddr)
+{
+ struct pt_image_section_cache *iscache;
+ struct pt_image *image;
+ int isid;
+
+ image = pt_sb_ctx_image(context);
+ if (!image)
+ return -pte_internal;
+
+ iscache = pt_sb_iscache(session);
+ if (!iscache)
+ return pt_image_add_file(image, filename, offset, size, NULL,
+ vaddr);
+
+ isid = pt_iscache_add_file(iscache, filename, offset, size, vaddr);
+ if (isid < 0)
+ return isid;
+
+ return pt_image_add_cached(image, iscache, isid, NULL);
+}
+
+int pt_sb_ctx_switch_to(struct pt_image **pimage, struct pt_sb_session *session,
+ const struct pt_sb_context *context)
+{
+ pt_sb_ctx_switch_notifier_t *notify_switch_to;
+ struct pt_image *image;
+ int errcode;
+
+ if (!pimage || !session)
+ return -pte_internal;
+
+ image = pt_sb_ctx_image(context);
+ if (!image)
+ return -pte_internal;
+
+ notify_switch_to = session->notify_switch_to;
+ if (notify_switch_to) {
+ errcode = notify_switch_to(context, session->priv_switch_to);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ *pimage = image;
+
+ return 0;
+}
diff --git a/sideband/src/pt_sb_file.c b/sideband/src/pt_sb_file.c
new file mode 100644
index 000000000000..563632556df7
--- /dev/null
+++ b/sideband/src/pt_sb_file.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_sb_file.h"
+
+#include <stdint.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "intel-pt.h"
+
+
+int pt_sb_file_load(void **pbuffer, size_t *psize, const char *filename,
+ size_t begin, size_t end)
+{
+ size_t size;
+ FILE *file;
+ void *content;
+ long fsize, fbegin, fend;
+ int errcode;
+
+ if (!pbuffer || !psize || !filename)
+ return -pte_invalid;
+
+ if (end && end <= begin)
+ return -pte_invalid;
+
+ if (LONG_MAX < begin || LONG_MAX < end)
+ return -pte_invalid;
+
+ file = fopen(filename, "rb");
+ if (!file)
+ return -pte_bad_file;
+
+ errcode = fseek(file, 0, SEEK_END);
+ if (errcode)
+ goto out_file;
+
+ fsize = ftell(file);
+ if (fsize < 0)
+ goto out_file;
+
+ fbegin = (long) begin;
+ if (!end)
+ fend = fsize;
+ else {
+ fend = (long) end;
+ if (fsize < fend)
+ fend = fsize;
+ }
+
+ size = (size_t) (fend - fbegin);
+
+ errcode = fseek(file, fbegin, SEEK_SET);
+ if (errcode)
+ goto out_file;
+
+ content = malloc(size);
+ if (!content) {
+ fclose(file);
+ return -pte_nomem;
+ }
+
+ *psize = fread(content, 1, size, file);
+ *pbuffer = content;
+
+ fclose(file);
+ return 0;
+
+out_file:
+ fclose(file);
+ return -pte_bad_file;
+}
diff --git a/sideband/src/pt_sb_pevent.c b/sideband/src/pt_sb_pevent.c
new file mode 100644
index 000000000000..a0daa23dd0ea
--- /dev/null
+++ b/sideband/src/pt_sb_pevent.c
@@ -0,0 +1,1710 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "libipt-sb.h"
+
+#include "intel-pt.h"
+
+
+#ifndef FEATURE_PEVENT
+
+int pt_sb_alloc_pevent_decoder(struct pt_sb_session *session,
+ const struct pt_sb_pevent_config *config)
+{
+ (void) session;
+ (void) config;
+
+ return -pte_not_supported;
+}
+
+#else /* FEATURE_PEVENT */
+
+#include "pt_sb_pevent.h"
+#include "pt_sb_session.h"
+#include "pt_sb_context.h"
+#include "pt_sb_file.h"
+#include "pt_compiler.h"
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if defined(_MSC_VER) && (_MSC_VER < 1900)
+# define snprintf _snprintf_c
+#endif
+
+
+#ifndef FEATURE_ELF
+
+static int elf_get_abi(FILE *file)
+{
+ if (!file)
+ return -pte_internal;
+
+ return pt_sb_abi_unknown;
+}
+
+#else /* FEATURE_ELF */
+
+#include <elf.h>
+
+
+static int elf_get_abi(FILE *file)
+{
+ uint8_t e_ident[EI_NIDENT];
+ size_t count;
+ int status;
+
+ if (!file)
+ return -pte_internal;
+
+ status = fseek(file, 0, SEEK_SET);
+ if (status < 0)
+ return pt_sb_abi_unknown;
+
+ count = fread(e_ident, sizeof(e_ident), 1, file);
+ if (count != 1)
+ return pt_sb_abi_unknown;
+
+ status = memcmp(e_ident, ELFMAG, SELFMAG);
+ if (status != 0)
+ return pt_sb_abi_unknown;
+
+ if (e_ident[EI_VERSION] != EV_CURRENT)
+ return pt_sb_abi_unknown;
+
+ switch (e_ident[EI_CLASS]) {
+ default:
+ break;
+
+ case ELFCLASS64:
+ return pt_sb_abi_x64;
+
+ case ELFCLASS32: {
+ Elf32_Ehdr ehdr;
+
+ status = fseek(file, 0, SEEK_SET);
+ if (status < 0)
+ break;
+
+ count = fread(&ehdr, sizeof(ehdr), 1, file);
+ if (count != 1)
+ break;
+
+ switch (ehdr.e_machine) {
+ default:
+ break;
+
+ case EM_386:
+ return pt_sb_abi_ia32;
+
+ case EM_X86_64:
+ return pt_sb_abi_x32;
+ }
+ }
+ break;
+ }
+
+ return pt_sb_abi_unknown;
+}
+
+#endif /* FEATURE_ELF */
+
+static int pt_sb_pevent_error(const struct pt_sb_session *session, int errcode,
+ const struct pt_sb_pevent_priv *priv)
+{
+ const char *filename;
+ uint64_t offset;
+
+ filename = NULL;
+ offset = 0ull;
+
+ if (priv) {
+ const uint8_t *pos;
+
+ pos = priv->current;
+ if (!pos)
+ pos = priv->next;
+
+ filename = priv->filename;
+ offset = (uint64_t) (pos - priv->begin);
+ }
+
+ return pt_sb_error(session, errcode, filename, offset);
+}
+
+static int pt_sb_pevent_track_abi(struct pt_sb_context *context,
+ const char *filename)
+{
+ FILE *file;
+ int abi;
+
+ if (!context || !filename)
+ return -pte_internal;
+
+ if (context->abi)
+ return 0;
+
+ file = fopen(filename, "rb");
+ if (!file)
+ return 0;
+
+ abi = elf_get_abi(file);
+
+ fclose(file);
+
+ if (abi < 0)
+ return abi;
+
+ context->abi = (enum pt_sb_abi) abi;
+
+ return 0;
+}
+
+static int pt_sb_pevent_find_vdso(const char **pvdso,
+ const struct pt_sb_pevent_priv *priv,
+ const struct pt_sb_context *context)
+{
+ const char *vdso;
+
+ if (!pvdso || !priv || !context)
+ return -pte_internal;
+
+ vdso = NULL;
+
+ switch (context->abi) {
+ case pt_sb_abi_unknown:
+ break;
+
+ case pt_sb_abi_x64:
+ vdso = priv->vdso_x64;
+ break;
+
+ case pt_sb_abi_x32:
+ vdso = priv->vdso_x32;
+ break;
+
+ case pt_sb_abi_ia32:
+ vdso = priv->vdso_ia32;
+ break;
+ }
+
+ if (!vdso)
+ return -pte_bad_config;
+
+ *pvdso = vdso;
+
+ return 0;
+}
+
+static void pt_sb_pevent_dtor(void *priv_arg)
+{
+ struct pt_sb_pevent_priv *priv;
+ struct pt_sb_context *context;
+
+ priv = (struct pt_sb_pevent_priv *) priv_arg;
+ if (!priv)
+ return;
+
+ context = priv->next_context;
+ if (context)
+ pt_sb_ctx_put(context);
+
+ context = priv->context;
+ if (context)
+ pt_sb_ctx_put(context);
+
+ free(priv->filename);
+ free(priv->sysroot);
+ free(priv->vdso_x64);
+ free(priv->vdso_x32);
+ free(priv->vdso_ia32);
+ free(priv->begin);
+ free(priv);
+}
+
+static int pt_sb_pevent_init_path(char **dst, const char *src)
+{
+ size_t len;
+ char *copy;
+
+ if (!dst)
+ return -pte_internal;
+
+ if (!src) {
+ *dst = NULL;
+ return 0;
+ }
+
+ len = strnlen(src, FILENAME_MAX);
+ if (len == FILENAME_MAX)
+ return -pte_invalid;
+
+ len += 1;
+ copy = malloc(len);
+ if (!copy)
+ return -pte_nomem;
+
+ memcpy(copy, src, len);
+
+ *dst = copy;
+
+ return 0;
+}
+
+int pt_sb_pevent_init(struct pt_sb_pevent_priv *priv,
+ const struct pt_sb_pevent_config *config)
+{
+ const char *filename;
+ size_t size;
+ void *buffer;
+ int errcode;
+
+ if (!priv || !config)
+ return -pte_internal;
+
+ /* This is the first version - we need all the fields. */
+ if (config->size < sizeof(*config))
+ return -pte_invalid;
+
+ filename = config->filename;
+ if (!filename)
+ return -pte_invalid;
+
+ buffer = NULL;
+ size = 0;
+ errcode = pt_sb_file_load(&buffer, &size, filename,
+ config->begin, config->end);
+ if (errcode < 0)
+ return errcode;
+
+ memset(priv, 0, sizeof(*priv));
+ priv->begin = (uint8_t *) buffer;
+ priv->end = (uint8_t *) buffer + size;
+ priv->next = (uint8_t *) buffer;
+
+ errcode = pt_sb_pevent_init_path(&priv->filename, filename);
+ if (errcode < 0) {
+ pt_sb_pevent_dtor(priv);
+ return errcode;
+ }
+
+ errcode = pt_sb_pevent_init_path(&priv->sysroot, config->sysroot);
+ if (errcode < 0) {
+ pt_sb_pevent_dtor(priv);
+ return errcode;
+ }
+
+ errcode = pt_sb_pevent_init_path(&priv->vdso_x64, config->vdso_x64);
+ if (errcode < 0) {
+ pt_sb_pevent_dtor(priv);
+ return errcode;
+ }
+
+ errcode = pt_sb_pevent_init_path(&priv->vdso_x32, config->vdso_x32);
+ if (errcode < 0) {
+ pt_sb_pevent_dtor(priv);
+ return errcode;
+ }
+
+ errcode = pt_sb_pevent_init_path(&priv->vdso_ia32, config->vdso_ia32);
+ if (errcode < 0) {
+ pt_sb_pevent_dtor(priv);
+ return errcode;
+ }
+
+ pev_config_init(&priv->pev);
+ priv->pev.sample_type = config->sample_type;
+ priv->pev.time_shift = config->time_shift;
+ priv->pev.time_mult = config->time_mult;
+ priv->pev.time_zero = config->time_zero;
+
+ priv->kernel_start = config->kernel_start;
+ priv->tsc_offset = config->tsc_offset;
+ priv->location = ploc_unknown;
+
+ return 0;
+}
+
+static int pt_sb_pevent_fetch(uint64_t *ptsc, struct pt_sb_pevent_priv *priv)
+{
+ struct pev_event *event;
+ const uint8_t *pos;
+ uint64_t tsc, offset;
+ int size;
+
+ if (!ptsc || !priv)
+ return -pte_internal;
+
+ pos = priv->next;
+ event = &priv->event;
+
+ /* Consume the current record early so we get the offset right when
+ * diagnosing fetch errors.
+ */
+ priv->current = pos;
+
+ size = pev_read(event, pos, priv->end, &priv->pev);
+ if (size < 0)
+ return size;
+
+ priv->next = pos + size;
+
+ /* If we don't have a time sample, set @ptsc to zero to process the
+ * record immediately.
+ */
+ if (!event->sample.time) {
+ *ptsc = 0ull;
+ return 0;
+ }
+
+ /* Subtract a pre-defined offset to cause sideband events from this
+ * channel to be applied a little earlier.
+ *
+ * We don't want @tsc to wrap around when subtracting @offset, though.
+ * This would suddenly push the event very far out and essentially block
+ * this sideband channel.
+ *
+ * On the other hand, we want to allow 'negative' offsets. And for
+ * those, we want to avoid wrapping around in the other direction.
+ */
+ offset = priv->tsc_offset;
+ tsc = event->sample.tsc;
+ if (offset <= tsc)
+ tsc -= offset;
+ else {
+ if (0ll <= (int64_t) offset)
+ tsc = 0ull;
+ else {
+ if (tsc <= offset)
+ tsc -= offset;
+ else
+ tsc = UINT64_MAX;
+ }
+ }
+
+ /* We update the event record's timestamp, as well, so we will print the
+ * updated tsc and apply the event at the right time.
+ *
+ * Note that we only update our copy in @priv, not the sideband stream.
+ */
+ event->sample.tsc = tsc;
+ *ptsc = tsc;
+
+ return 0;
+}
+
+static int pt_sb_pevent_print_event(const struct pev_event *event,
+ FILE *stream, uint32_t flags)
+{
+ if (!event)
+ return -pte_internal;
+
+ switch (event->type) {
+ default:
+ if (flags & ptsbp_compact)
+ fprintf(stream, "UNKNOWN (%x, %x)", event->type,
+ event->misc);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "UNKNOWN");
+ fprintf(stream, "\n type: %x", event->type);
+ fprintf(stream, "\n misc: %x", event->misc);
+ }
+
+ break;
+
+ case PERF_RECORD_MMAP: {
+ const struct pev_record_mmap *mmap;
+
+ mmap = event->record.mmap;
+ if (!mmap)
+ return -pte_bad_packet;
+
+ if (flags & ptsbp_compact)
+ fprintf(stream, "PERF_RECORD_MMAP %x/%x, %" PRIx64
+ ", %" PRIx64 ", %" PRIx64 ", %s",
+ mmap->pid, mmap->tid, mmap->addr, mmap->len,
+ mmap->pgoff, mmap->filename);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "PERF_RECORD_MMAP");
+ fprintf(stream, "\n pid: %x", mmap->pid);
+ fprintf(stream, "\n tid: %x", mmap->tid);
+ fprintf(stream, "\n addr: %" PRIx64, mmap->addr);
+ fprintf(stream, "\n len: %" PRIx64, mmap->len);
+ fprintf(stream, "\n pgoff: %" PRIx64, mmap->pgoff);
+ fprintf(stream, "\n filename: %s", mmap->filename);
+ }
+ }
+ break;
+
+ case PERF_RECORD_LOST: {
+ const struct pev_record_lost *lost;
+
+ lost = event->record.lost;
+ if (!lost)
+ return -pte_bad_packet;
+
+ if (flags & ptsbp_compact)
+ fprintf(stream, "PERF_RECORD_LOST %" PRIx64 ", %"
+ PRIx64, lost->id, lost->lost);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "PERF_RECORD_LOST");
+ fprintf(stream, "\n id: %" PRIx64, lost->id);
+ fprintf(stream, "\n lost: %" PRIx64, lost->lost);
+ }
+ }
+ break;
+
+ case PERF_RECORD_COMM: {
+ const struct pev_record_comm *comm;
+ const char *sfx;
+
+ comm = event->record.comm;
+ if (!comm)
+ return -pte_bad_packet;
+
+ sfx = event->misc & PERF_RECORD_MISC_COMM_EXEC ? ".EXEC" : "";
+
+ if (flags & ptsbp_compact)
+ fprintf(stream, "PERF_RECORD_COMM%s %x/%x, %s", sfx,
+ comm->pid, comm->tid, comm->comm);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "PERF_RECORD_COMM%s", sfx);
+ fprintf(stream, "\n pid: %x", comm->pid);
+ fprintf(stream, "\n tid: %x", comm->tid);
+ fprintf(stream, "\n comm: %s", comm->comm);
+ }
+ }
+ break;
+
+ case PERF_RECORD_EXIT: {
+ const struct pev_record_exit *exit;
+
+ exit = event->record.exit;
+ if (!exit)
+ return -pte_bad_packet;
+
+ if (flags & ptsbp_compact)
+ fprintf(stream, "PERF_RECORD_EXIT %x/%x, %x/%x, %"
+ PRIx64, exit->pid, exit->tid, exit->ppid,
+ exit->ptid, exit->time);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "PERF_RECORD_EXIT");
+ fprintf(stream, "\n pid: %x", exit->pid);
+ fprintf(stream, "\n ppid: %x", exit->ppid);
+ fprintf(stream, "\n tid: %x", exit->tid);
+ fprintf(stream, "\n ptid: %x", exit->ptid);
+ fprintf(stream, "\n time: %" PRIx64, exit->time);
+ }
+ }
+ break;
+
+ case PERF_RECORD_THROTTLE: {
+ const struct pev_record_throttle *throttle;
+
+ throttle = event->record.throttle;
+ if (!throttle)
+ return -pte_bad_packet;
+
+ if (flags & ptsbp_compact)
+ fprintf(stream, "PERF_RECORD_THROTTLE %" PRIx64 ", %"
+ PRIx64 ", %" PRIx64, throttle->time,
+ throttle->id, throttle->stream_id);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "PERF_RECORD_THROTTLE");
+ fprintf(stream, "\n time: %" PRIx64, throttle->time);
+ fprintf(stream, "\n id: %" PRIx64, throttle->id);
+ fprintf(stream, "\n stream_id: %" PRIx64,
+ throttle->stream_id);
+ }
+ }
+ break;
+
+ case PERF_RECORD_UNTHROTTLE: {
+ const struct pev_record_throttle *throttle;
+
+ throttle = event->record.throttle;
+ if (!throttle)
+ return -pte_bad_packet;
+
+ if (flags & ptsbp_compact)
+ fprintf(stream, "PERF_RECORD_UNTHROTTLE %" PRIx64
+ ", %" PRIx64 ", %" PRIx64, throttle->time,
+ throttle->id, throttle->stream_id);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "PERF_RECORD_UNTHROTTLE");
+ fprintf(stream, "\n time: %" PRIx64, throttle->time);
+ fprintf(stream, "\n id: %" PRIx64, throttle->id);
+ fprintf(stream, "\n stream_id: %" PRIx64,
+ throttle->stream_id);
+ }
+ }
+ break;
+
+ case PERF_RECORD_FORK: {
+ const struct pev_record_fork *fork;
+
+ fork = event->record.fork;
+ if (!fork)
+ return -pte_bad_packet;
+
+ if (flags & ptsbp_compact)
+ fprintf(stream, "PERF_RECORD_FORK %x/%x, %x/%x, %"
+ PRIx64, fork->pid, fork->tid, fork->ppid,
+ fork->ptid, fork->time);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "PERF_RECORD_FORK");
+ fprintf(stream, "\n pid: %x", fork->pid);
+ fprintf(stream, "\n ppid: %x", fork->ppid);
+ fprintf(stream, "\n tid: %x", fork->tid);
+ fprintf(stream, "\n ptid: %x", fork->ptid);
+ fprintf(stream, "\n time: %" PRIx64, fork->time);
+ }
+ }
+ break;
+
+ case PERF_RECORD_MMAP2: {
+ const struct pev_record_mmap2 *mmap2;
+
+ mmap2 = event->record.mmap2;
+ if (!mmap2)
+ return -pte_bad_packet;
+
+ if (flags & ptsbp_compact)
+ fprintf(stream, "PERF_RECORD_MMAP2 %x/%x, %" PRIx64
+ ", %" PRIx64 ", %" PRIx64 ", %x, %x, %" PRIx64
+ ", %" PRIx64 ", %x, %x, %s", mmap2->pid,
+ mmap2->tid, mmap2->addr, mmap2->len,
+ mmap2->pgoff, mmap2->maj, mmap2->min,
+ mmap2->ino, mmap2->ino_generation, mmap2->prot,
+ mmap2->flags, mmap2->filename);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "PERF_RECORD_MMAP2");
+ fprintf(stream, "\n pid: %x", mmap2->pid);
+ fprintf(stream, "\n tid: %x", mmap2->tid);
+ fprintf(stream, "\n addr: %" PRIx64, mmap2->addr);
+ fprintf(stream, "\n len: %" PRIx64, mmap2->len);
+ fprintf(stream, "\n pgoff: %" PRIx64, mmap2->pgoff);
+ fprintf(stream, "\n maj: %x", mmap2->maj);
+ fprintf(stream, "\n min: %x", mmap2->min);
+ fprintf(stream, "\n ino: %" PRIx64, mmap2->ino);
+ fprintf(stream, "\n ino_generation: %" PRIx64,
+ mmap2->ino_generation);
+ fprintf(stream, "\n prot: %x", mmap2->prot);
+ fprintf(stream, "\n flags: %x", mmap2->flags);
+ fprintf(stream, "\n filename: %s", mmap2->filename);
+ }
+ }
+ break;
+
+ case PERF_RECORD_AUX: {
+ const struct pev_record_aux *aux;
+ const char *sfx;
+
+ aux = event->record.aux;
+ if (!aux)
+ return -pte_bad_packet;
+
+ sfx = aux->flags & PERF_AUX_FLAG_TRUNCATED ? ".TRUNCATED" : "";
+
+ if (flags & ptsbp_compact)
+ fprintf(stream, "PERF_RECORD_AUX%s %" PRIx64 ", %"
+ PRIx64 ", %" PRIx64, sfx, aux->aux_offset,
+ aux->aux_size, aux->flags);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "PERF_RECORD_AUX%s", sfx);
+ fprintf(stream, "\n aux offset: %" PRIx64,
+ aux->aux_offset);
+ fprintf(stream, "\n aux size: %" PRIx64,
+ aux->aux_size);
+ fprintf(stream, "\n flags: %" PRIx64, aux->flags);
+ }
+ }
+ break;
+
+ case PERF_RECORD_ITRACE_START: {
+ const struct pev_record_itrace_start *itrace_start;
+
+ itrace_start = event->record.itrace_start;
+ if (!itrace_start)
+ return -pte_bad_packet;
+
+ if (flags & ptsbp_compact)
+ fprintf(stream, "PERF_RECORD_ITRACE_START %x/%x",
+ itrace_start->pid, itrace_start->tid);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "PERF_RECORD_ITRACE_START");
+ fprintf(stream, "\n pid: %x", itrace_start->pid);
+ fprintf(stream, "\n tid: %x", itrace_start->tid);
+ }
+ }
+ break;
+
+ case PERF_RECORD_LOST_SAMPLES: {
+ const struct pev_record_lost_samples *lost_samples;
+
+ lost_samples = event->record.lost_samples;
+ if (!lost_samples)
+ return -pte_bad_packet;
+
+ if (flags & ptsbp_compact)
+ fprintf(stream, "PERF_RECORD_LOST_SAMPLES %" PRIx64,
+ lost_samples->lost);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "PERF_RECORD_LOST_SAMPLES");
+ fprintf(stream, "\n lost: %" PRIx64,
+ lost_samples->lost);
+ }
+
+ }
+ break;
+
+ case PERF_RECORD_SWITCH: {
+ const char *sfx;
+
+ sfx = event->misc & PERF_RECORD_MISC_SWITCH_OUT ? "OUT" : "IN";
+
+ if (flags & (ptsbp_compact | ptsbp_verbose))
+ fprintf(stream, "PERF_RECORD_SWITCH.%s", sfx);
+ }
+ break;
+
+ case PERF_RECORD_SWITCH_CPU_WIDE: {
+ const struct pev_record_switch_cpu_wide *switch_cpu_wide;
+ const char *sfx, *pfx;
+
+ if (event->misc & PERF_RECORD_MISC_SWITCH_OUT) {
+ sfx = "OUT";
+ pfx = "next";
+ } else {
+ sfx = "IN";
+ pfx = "prev";
+ }
+
+ switch_cpu_wide = event->record.switch_cpu_wide;
+ if (!switch_cpu_wide)
+ return -pte_bad_packet;
+
+ if (flags & ptsbp_compact)
+ fprintf(stream, "PERF_RECORD_SWITCH_CPU_WIDE.%s %x/%x",
+ sfx, switch_cpu_wide->next_prev_pid,
+ switch_cpu_wide->next_prev_tid);
+
+ if (flags & ptsbp_verbose) {
+ fprintf(stream, "PERF_RECORD_SWITCH_CPU_WIDE.%s", sfx);
+ fprintf(stream, "\n %s pid: %x", pfx,
+ switch_cpu_wide->next_prev_pid);
+ fprintf(stream, "\n %s tid: %x", pfx,
+ switch_cpu_wide->next_prev_tid);
+ }
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int pt_sb_pevent_print_samples_compact(const struct pev_event *event,
+ FILE *stream)
+{
+ if (!event)
+ return -pte_internal;
+
+ fprintf(stream, " {");
+
+ if (event->sample.pid && event->sample.tid)
+ fprintf(stream, " %x/%x", *event->sample.pid,
+ *event->sample.tid);
+
+ if (event->sample.time)
+ fprintf(stream, " %" PRIx64, *event->sample.time);
+
+ if (event->sample.id)
+ fprintf(stream, " %" PRIx64, *event->sample.id);
+
+ if (event->sample.cpu)
+ fprintf(stream, " cpu-%x", *event->sample.cpu);
+
+ if (event->sample.stream_id)
+ fprintf(stream, " %" PRIx64, *event->sample.stream_id);
+
+ if (event->sample.identifier)
+ fprintf(stream, " %" PRIx64, *event->sample.identifier);
+
+ fprintf(stream, " }");
+
+ return 0;
+}
+
+static int pt_sb_pevent_print_samples_verbose(const struct pev_event *event,
+ FILE *stream)
+{
+ if (!event)
+ return -pte_internal;
+
+ if (event->sample.pid && event->sample.tid) {
+ fprintf(stream, "\n pid: %x", *event->sample.pid);
+ fprintf(stream, "\n tid: %x", *event->sample.tid);
+ }
+
+ if (event->sample.time)
+ fprintf(stream, "\n time: %" PRIx64, *event->sample.time);
+
+ if (event->sample.id)
+ fprintf(stream, "\n id: %" PRIx64, *event->sample.id);
+
+ if (event->sample.cpu)
+ fprintf(stream, "\n cpu: %x", *event->sample.cpu);
+
+ if (event->sample.stream_id)
+ fprintf(stream, "\n stream id: %" PRIx64,
+ *event->sample.stream_id);
+
+ if (event->sample.identifier)
+ fprintf(stream, "\n identifier: %" PRIx64,
+ *event->sample.identifier);
+
+ return 0;
+}
+
+static int pt_sb_pevent_print_samples(const struct pev_event *event,
+ FILE *stream, uint32_t flags)
+{
+ int errcode;
+
+ if (flags & ptsbp_compact) {
+ errcode = pt_sb_pevent_print_samples_compact(event, stream);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ if (flags & ptsbp_verbose) {
+ errcode = pt_sb_pevent_print_samples_verbose(event, stream);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+}
+
+static int pt_sb_pevent_print(struct pt_sb_pevent_priv *priv, FILE *stream,
+ uint32_t flags)
+{
+ struct pev_event *event;
+ int errcode;
+
+ if (!priv)
+ return -pte_internal;
+
+ /* We should not be called before fetching the first record. */
+ if (!priv->current)
+ return -pte_internal;
+
+ if (!priv->filename)
+ return -pte_internal;
+
+ event = &priv->event;
+
+ /* Print filename and/or file offset before the actual record. */
+ switch (flags & (ptsbp_filename | ptsbp_file_offset)) {
+ case ptsbp_filename | ptsbp_file_offset:
+ fprintf(stream, "%s:%016" PRIx64 " ", priv->filename,
+ (uint64_t) (priv->current - priv->begin));
+ break;
+
+ case ptsbp_filename:
+ fprintf(stream, "%s ", priv->filename);
+ break;
+
+ case ptsbp_file_offset:
+ fprintf(stream, "%016" PRIx64 " ",
+ (uint64_t) (priv->current - priv->begin));
+ break;
+ }
+
+ /* Print the timestamp if requested and available. */
+ if ((flags & ptsbp_tsc) && event->sample.time)
+ fprintf(stream, "%016" PRIx64 " ", event->sample.tsc);
+
+ /* Print the actual sideband record. */
+ errcode = pt_sb_pevent_print_event(event, stream, flags);
+ if (errcode < 0)
+ return errcode;
+
+ /* Print samples if configured. */
+ if (priv->pev.sample_type) {
+ errcode = pt_sb_pevent_print_samples(event, stream, flags);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ if (flags)
+ fprintf(stream, "\n");
+
+ return 0;
+}
+
+static int pt_sb_pevent_switch_contexts(struct pt_sb_session *session,
+ struct pt_image **image,
+ struct pt_sb_pevent_priv *priv)
+{
+ struct pt_sb_context *prev, *next;
+ int errcode;
+
+ if (!priv || !image)
+ return -pte_internal;
+
+ prev = priv->context;
+ next = priv->next_context;
+ if (!next)
+ return -pte_internal;
+
+ errcode = pt_sb_ctx_switch_to(image, session, next);
+ if (errcode < 0)
+ return errcode;
+
+ priv->next_context = NULL;
+ priv->context = next;
+
+ return prev ? pt_sb_ctx_put(prev) : 0;
+}
+
+static int pt_sb_pevent_cancel_context_switch(struct pt_sb_pevent_priv *priv)
+{
+ struct pt_sb_context *context;
+
+ if (!priv)
+ return -pte_internal;
+
+ context = priv->next_context;
+ if (!context)
+ return 0;
+
+ priv->next_context = NULL;
+
+ return pt_sb_ctx_put(context);
+}
+
+static int pt_sb_pevent_prepare_context_switch(struct pt_sb_pevent_priv *priv,
+ struct pt_sb_context *context)
+{
+ int errcode;
+
+ if (!priv || !context)
+ return -pte_internal;
+
+ /* There's nothing to do if this switch is already pending.
+ *
+ * This could be the result of applying a cpu-wide switch-out followed
+ * by a cpu-wide switch-in without a chance to actually apply the
+ * context switch in-between.
+ */
+ if (priv->next_context == context)
+ return 0;
+
+ /* This context switch overwrites any previously pending switch.
+ *
+ * We may skip context switches due to imprecise timing or due to
+ * re-synchronization after an error.
+ */
+ errcode = pt_sb_pevent_cancel_context_switch(priv);
+ if (errcode < 0)
+ return errcode;
+
+ /* There's nothing to do if we're switching to the current context.
+ *
+ * This could be the result of switching between threads of the same
+ * process or of applying a cpu-wide switch-out followed by a cpu-wide
+ * switch-in.
+ */
+ if (priv->context == context)
+ return 0;
+
+ errcode = pt_sb_ctx_get(context);
+ if (errcode < 0)
+ return errcode;
+
+ priv->next_context = context;
+
+ return 0;
+}
+
+static int pt_sb_pevent_prepare_switch_to_pid(struct pt_sb_session *session,
+ struct pt_sb_pevent_priv *priv,
+ uint32_t pid)
+{
+ struct pt_sb_context *context;
+ int errcode;
+
+ context = NULL;
+ errcode = pt_sb_get_context_by_pid(&context, session, pid);
+ if (errcode < 0)
+ return errcode;
+
+ return pt_sb_pevent_prepare_context_switch(priv, context);
+}
+
+static int pt_sb_pevent_remove_context_for_pid(struct pt_sb_session *session,
+ uint32_t pid)
+{
+ struct pt_sb_context *context;
+ int errcode;
+
+ context = NULL;
+ errcode = pt_sb_find_context_by_pid(&context, session, pid);
+ if (errcode < 0)
+ return errcode;
+
+ if (!context)
+ return 0;
+
+ return pt_sb_remove_context(session, context);
+}
+
+static int
+pt_sb_pevent_itrace_start(struct pt_sb_session *session,
+ struct pt_image **image,
+ struct pt_sb_pevent_priv *priv,
+ const struct pev_record_itrace_start *record)
+{
+ int errcode;
+
+ if (!image || !record)
+ return -pte_internal;
+
+ errcode = pt_sb_pevent_prepare_switch_to_pid(session, priv,
+ record->pid);
+ if (errcode < 0)
+ return errcode;
+
+ /* We may have already installed the starting context. */
+ if (!priv->next_context)
+ return 0;
+
+ /* If we have not, let's not wait for a suitable event.
+ *
+ * We just started tracing so there's no reason to wait for a suitable
+ * location.
+ */
+ return pt_sb_pevent_switch_contexts(session, image, priv);
+}
+
+static int pt_sb_pevent_fork(struct pt_sb_session *session,
+ const struct pev_record_fork *record)
+{
+ struct pt_sb_context *context, *parent;
+ struct pt_image *image, *pimage;
+ uint32_t ppid, pid;
+ int errcode;
+
+ if (!record)
+ return -pte_internal;
+
+ /* If this is just creating a new thread, there's nothing to do.
+ *
+ * We should already have a context for this process. If we don't, it
+ * doesn't really help to create a new context with an empty process
+ * image at this point.
+ */
+ ppid = record->ppid;
+ pid = record->pid;
+ if (ppid == pid)
+ return 0;
+
+ /* We're creating a new process plus the initial thread.
+ *
+ * That initial thread should get the same id as the process.
+ */
+ if (pid != record->tid)
+ return -pte_internal;
+
+ /* Remove any existing context we might have for @pid.
+ *
+ * We're not removing process contexts when we get the exit event since
+ * that is sent while the process is still running inside the kernel.
+ */
+ errcode = pt_sb_pevent_remove_context_for_pid(session, pid);
+ if (errcode < 0)
+ return errcode;
+
+ /* Create a new context for this new process. */
+ context = NULL;
+ errcode = pt_sb_get_context_by_pid(&context, session, pid);
+ if (errcode < 0)
+ return errcode;
+
+ /* Let's see if we also know about the parent process. */
+ parent = NULL;
+ errcode = pt_sb_find_context_by_pid(&parent, session, ppid);
+ if (errcode < 0)
+ return errcode;
+
+ if (!parent)
+ return 0;
+
+ /* Both parent and child must have valid images. */
+ pimage = pt_sb_ctx_image(parent);
+ image = pt_sb_ctx_image(context);
+ if (!pimage || !image)
+ return -pte_internal;
+
+ /* Initialize the child's image with its parent's. */
+ return pt_image_copy(image, pimage);
+}
+
+static int pt_sb_pevent_exec(struct pt_sb_session *session,
+ struct pt_image **image,
+ struct pt_sb_pevent_priv *priv,
+ const struct pev_record_comm *record)
+{
+ struct pt_sb_context *context;
+ uint32_t pid;
+ int errcode;
+
+ if (!record)
+ return -pte_internal;
+
+ pid = record->pid;
+
+ /* Instead of replacing a context's image, we replace the context.
+ *
+ * This allows us to keep the old image around until we actually switch.
+ * We are likely using it at the moment.
+ */
+ errcode = pt_sb_pevent_remove_context_for_pid(session, pid);
+ if (errcode < 0)
+ return errcode;
+
+ /* This creates a new context and a new image.
+ *
+ * This new image will already be initialized with kernel sections but
+ * will otherwise be empty. We will populate it later with MMAP records
+ * that follow this COMM.EXEC record.
+ */
+ context = NULL;
+ errcode = pt_sb_get_context_by_pid(&context, session, pid);
+ if (errcode < 0)
+ return errcode;
+
+ /* If we're not maintaining a decoder, we're done. */
+ if (!image)
+ return 0;
+
+ /* We replaced the previous context of @pid with @context. Let's
+ * (prepare to) switch to the new @context.
+ *
+ * The actual context switch is postponed until we're in kernel context.
+ *
+ * It is quite likely that we are currently using the previous context
+ * we removed earlier in order to reach the location where we transition
+ * into the kernel. In the trace, we have not yet exec'ed.
+ */
+ return pt_sb_pevent_prepare_context_switch(priv, context);
+}
+
+static int pt_sb_pevent_switch(struct pt_sb_session *session,
+ struct pt_sb_pevent_priv *priv,
+ const uint32_t *pid)
+{
+ if (!pid)
+ return -pte_bad_config;
+
+ return pt_sb_pevent_prepare_switch_to_pid(session, priv, *pid);
+}
+
+static int
+pt_sb_pevent_switch_cpu(struct pt_sb_session *session,
+ struct pt_sb_pevent_priv *priv,
+ const struct pev_record_switch_cpu_wide *record)
+{
+ if (!record)
+ return -pte_internal;
+
+ return pt_sb_pevent_prepare_switch_to_pid(session, priv,
+ record->next_prev_pid);
+}
+
+static int pt_sb_pevent_map(struct pt_sb_session *session,
+ const struct pt_sb_pevent_priv *priv, uint32_t pid,
+ const char *filename, uint64_t offset,
+ uint64_t size, uint64_t vaddr)
+{
+ struct pt_sb_context *context;
+ const char *sysroot;
+ char buffer[FILENAME_MAX];
+ int errcode;
+
+ if (!priv || !filename)
+ return -pte_internal;
+
+ /* Get the context for this process. */
+ context = NULL;
+ errcode = pt_sb_get_context_by_pid(&context, session, pid);
+ if (errcode < 0)
+ return errcode;
+
+ /* The optional system root directoy. */
+ sysroot = priv->sysroot;
+
+ /* Some filenames do not represent actual files on disk. We handle
+ * some of those and ignore the rest.
+ */
+ if (filename[0] == '[') {
+ /* The [vdso] file represents the vdso that is mapped into
+ * every process.
+ *
+ * We expect the user to provide all necessary vdso flavors.
+ */
+ if (strcmp(filename, "[vdso]") == 0) {
+ errcode = pt_sb_pevent_find_vdso(&filename, priv,
+ context);
+ if (errcode != 0)
+ return pt_sb_pevent_error(session, errcode,
+ priv);
+ } else
+ return pt_sb_pevent_error(session, ptse_section_lost,
+ priv);
+
+
+ } else if (strcmp(filename, "//anon") == 0) {
+ /* Those are anonymous mappings that are, for example, used by
+ * JIT compilers to generate code in that is later executed.
+ *
+ * There is no general support for this in perf and JIT enabling
+ * is work-in-progress.
+ *
+ * We will likely fail with -pte_nomap later on.
+ */
+ return pt_sb_pevent_error(session, ptse_section_lost, priv);
+
+ } else if (strstr(filename, " (deleted)")) {
+ /* The file that was mapped as meanwhile been deleted.
+ *
+ * We will likely fail with -pte_nomap later on.
+ */
+ return pt_sb_pevent_error(session, ptse_section_lost, priv);
+
+ } else if (sysroot) {
+ /* Prepend the sysroot to normal files. */
+ errcode = snprintf(buffer, sizeof(buffer), "%s%s", sysroot,
+ filename);
+ if (errcode < 0)
+ return -pte_overflow;
+
+ filename = buffer;
+ }
+
+ errcode = pt_sb_pevent_track_abi(context, filename);
+ if (errcode < 0)
+ return errcode;
+
+ return pt_sb_ctx_mmap(session, context, filename, offset, size, vaddr);
+}
+
+static int pt_sb_pevent_mmap(struct pt_sb_session *session,
+ const struct pt_sb_pevent_priv *priv,
+ const struct pev_record_mmap *record)
+{
+ if (!record)
+ return -pte_internal;
+
+ return pt_sb_pevent_map(session, priv, record->pid, record->filename,
+ record->pgoff, record->len, record->addr);
+}
+
+static int pt_sb_pevent_mmap2(struct pt_sb_session *session,
+ const struct pt_sb_pevent_priv *priv,
+ const struct pev_record_mmap2 *record)
+{
+ if (!record)
+ return -pte_internal;
+
+ return pt_sb_pevent_map(session, priv, record->pid, record->filename,
+ record->pgoff, record->len, record->addr);
+}
+
+static int pt_sb_pevent_aux(const struct pt_sb_session *session,
+ const struct pt_sb_pevent_priv *priv,
+ const struct pev_record_aux *record)
+{
+ if (!record)
+ return -pte_internal;
+
+ if (record->flags & PERF_AUX_FLAG_TRUNCATED)
+ return pt_sb_pevent_error(session, ptse_trace_lost, priv);
+
+ return 0;
+}
+
+static int pt_sb_pevent_ignore_mmap(uint16_t misc)
+{
+ /* We rely on the kernel core file for ring-0 decode.
+ *
+ * Both kernel and kernel modules are modified during boot and insmod
+ * respectively. We can't decode from the respective files on disk.
+ *
+ * Ignore kernel MMAP events so we don't overwrite useful data from
+ * kcore with useless data from binary files.
+ */
+ switch (misc & PERF_RECORD_MISC_CPUMODE_MASK) {
+ case PERF_RECORD_MISC_KERNEL:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+static int pt_sb_pevent_apply_event_record(struct pt_sb_session *session,
+ struct pt_image **image,
+ struct pt_sb_pevent_priv *priv,
+ const struct pev_event *event)
+{
+ if (!event)
+ return -pte_internal;
+
+ switch (event->type) {
+ default:
+ /* Ignore unknown events. */
+ break;
+
+ case PERF_RECORD_ITRACE_START:
+ /* Ignore trace starts from secondary sideband channels. */
+ if (!image)
+ break;
+
+ return pt_sb_pevent_itrace_start(session, image, priv,
+ event->record.itrace_start);
+
+ case PERF_RECORD_FORK:
+ return pt_sb_pevent_fork(session, event->record.fork);
+
+ case PERF_RECORD_COMM:
+ /* We're only interested in COMM.EXEC events. */
+ if (!(event->misc & PERF_RECORD_MISC_COMM_EXEC))
+ break;
+
+ return pt_sb_pevent_exec(session, image, priv,
+ event->record.comm);
+
+ case PERF_RECORD_SWITCH:
+ /* Ignore context switches from secondary sideband channels. */
+ if (!image)
+ break;
+
+ /* Ignore switch-out events. We wait for the switch-in. */
+ if (event->misc & PERF_RECORD_MISC_SWITCH_OUT)
+ break;
+
+ return pt_sb_pevent_switch(session, priv, event->sample.pid);
+
+ case PERF_RECORD_SWITCH_CPU_WIDE:
+ /* Ignore context switches from secondary sideband channels. */
+ if (!image)
+ break;
+
+ /* For switch-in events, we use the pid sample, if available.
+ *
+ * For cpu-wide switch events, not sampling pid is acceptable
+ * since we get the pid in @prev_next_pid of a switch-out event.
+ *
+ * We will use a cpu-wide switch-in event, if possible, but we
+ * should be able to do without most of the time.
+ */
+ if (!(event->misc & PERF_RECORD_MISC_SWITCH_OUT)) {
+ if (!event->sample.pid)
+ break;
+
+ return pt_sb_pevent_switch(session, priv,
+ event->sample.pid);
+ }
+
+ return pt_sb_pevent_switch_cpu(session, priv,
+ event->record.switch_cpu_wide);
+
+ case PERF_RECORD_MMAP:
+ /* We intentionally ignore some MMAP records. */
+ if (pt_sb_pevent_ignore_mmap(event->misc))
+ break;
+
+ return pt_sb_pevent_mmap(session, priv, event->record.mmap);
+
+ case PERF_RECORD_MMAP2:
+ /* We intentionally ignore some MMAP records. */
+ if (pt_sb_pevent_ignore_mmap(event->misc))
+ break;
+
+ return pt_sb_pevent_mmap2(session, priv, event->record.mmap2);
+
+ case PERF_RECORD_LOST:
+ /* Warn about losses.
+ *
+ * We put the warning into the output. It is quite likely that
+ * we will run into a decode error shortly after (or ran into it
+ * already); this warning may help explain it.
+ */
+ return pt_sb_pevent_error(session, ptse_lost, priv);
+
+ case PERF_RECORD_AUX:
+ /* Ignore trace losses from secondary sideband channels. */
+ if (!image)
+ break;
+
+ return pt_sb_pevent_aux(session, priv, event->record.aux);
+ }
+
+ return 0;
+}
+
+static int ploc_from_ip(enum pt_sb_pevent_loc *loc,
+ const struct pt_sb_pevent_priv *priv, uint64_t ip)
+{
+ if (!loc || !priv)
+ return -pte_internal;
+
+ *loc = (ip < priv->kernel_start) ? ploc_in_user : ploc_in_kernel;
+
+ return 0;
+}
+
+static int ploc_from_suppressed_ip(enum pt_sb_pevent_loc *loc,
+ enum pt_sb_pevent_loc from)
+{
+ if (!loc)
+ return -pte_internal;
+
+ switch (from) {
+ default:
+ *loc = ploc_unknown;
+ break;
+
+ case ploc_likely_in_kernel:
+ case ploc_in_kernel:
+ *loc = ploc_likely_in_user;
+ break;
+
+ case ploc_likely_in_user:
+ case ploc_in_user:
+ *loc = ploc_likely_in_kernel;
+ break;
+ }
+
+ return 0;
+}
+
+static int ploc_from_event(enum pt_sb_pevent_loc *loc,
+ const struct pt_sb_pevent_priv *priv,
+ const struct pt_event *event)
+{
+ if (!loc || !priv || !event)
+ return -pte_internal;
+
+ switch (event->type) {
+ default:
+ break;
+
+ case ptev_enabled:
+ return ploc_from_ip(loc, priv, event->variant.enabled.ip);
+
+ case ptev_disabled:
+ if (!event->ip_suppressed)
+ return ploc_from_ip(loc, priv,
+ event->variant.disabled.ip);
+
+ return ploc_from_suppressed_ip(loc, priv->location);
+
+ case ptev_async_disabled: {
+ enum pt_sb_pevent_loc fromloc;
+ int errcode;
+
+ if (!event->ip_suppressed)
+ return ploc_from_ip(loc, priv,
+ event->variant.async_disabled.ip);
+
+ errcode = ploc_from_ip(&fromloc, priv,
+ event->variant.async_disabled.at);
+ if (errcode < 0)
+ return errcode;
+
+ return ploc_from_suppressed_ip(loc, fromloc);
+ }
+
+ case ptev_async_branch:
+ return ploc_from_ip(loc, priv, event->variant.async_branch.to);
+
+ case ptev_async_paging:
+ if (!event->ip_suppressed)
+ return ploc_from_ip(loc, priv,
+ event->variant.async_paging.ip);
+
+ fallthrough;
+ case ptev_paging:
+ *loc = ploc_likely_in_kernel;
+ return 0;
+
+ case ptev_overflow:
+ if (!event->ip_suppressed)
+ return ploc_from_ip(loc, priv,
+ event->variant.overflow.ip);
+
+ break;
+
+ case ptev_exec_mode:
+ if (!event->ip_suppressed)
+ return ploc_from_ip(loc, priv,
+ event->variant.exec_mode.ip);
+
+ break;
+
+ case ptev_tsx:
+ if (!event->ip_suppressed)
+ return ploc_from_ip(loc, priv,
+ event->variant.tsx.ip);
+
+ break;
+
+ case ptev_exstop:
+ if (!event->ip_suppressed)
+ return ploc_from_ip(loc, priv,
+ event->variant.exstop.ip);
+
+ break;
+
+ case ptev_mwait:
+ if (!event->ip_suppressed)
+ return ploc_from_ip(loc, priv,
+ event->variant.mwait.ip);
+
+ break;
+
+ case ptev_ptwrite:
+ if (!event->ip_suppressed)
+ return ploc_from_ip(loc, priv,
+ event->variant.ptwrite.ip);
+
+ break;
+
+ case ptev_tick:
+ if (!event->ip_suppressed)
+ return ploc_from_ip(loc, priv,
+ event->variant.tick.ip);
+
+ break;
+ }
+
+ *loc = ploc_unknown;
+ return 0;
+}
+
+static int pt_sb_pevent_apply(struct pt_sb_session *session,
+ struct pt_image **image,
+ const struct pt_event *event,
+ struct pt_sb_pevent_priv *priv)
+{
+ const struct pev_event *record;
+ enum pt_sb_pevent_loc oldloc;
+ int errcode;
+
+ if (!priv || !event)
+ return -pte_internal;
+
+ /* If the current perf event record is due, apply it.
+ *
+ * We don't need to look at the actual event that provided the
+ * timestamp. It suffices to know that time moved beyond the current
+ * perf event record.
+ *
+ * It is tempting to postpone applying the record until a suitable event
+ * but we need to ensure that records from different channels are
+ * applied in timestamp order.
+ *
+ * So we apply the record solely based on timestamps and postpone its
+ * effect until a suitable event.
+ *
+ * The last record in the trace won't be overridden and we have to take
+ * care to not apply it twice. We need to keep it until we were able to
+ * place the last pending context switch.
+ */
+ record = &priv->event;
+ if ((priv->current != priv->next) &&
+ (!record->sample.time || (record->sample.tsc <= event->tsc)))
+ return pt_sb_pevent_apply_event_record(session, image, priv,
+ record);
+
+ /* We first apply all our sideband records one-by-one until we're in
+ * sync with the event.
+ *
+ * When we get here, we applied all previous sideband records. Let's
+ * use the event to keep track of kernel vs user space and apply any
+ * postponed context switches.
+ *
+ * We preserve the previous location to detect returns from kernel to
+ * user space.
+ */
+ oldloc = priv->location;
+ errcode = ploc_from_event(&priv->location, priv, event);
+ if (errcode < 0)
+ return errcode;
+
+ /* We postpone context switches until we reach a suitable location in
+ * the trace. If we don't have a context switch pending, we're done.
+ */
+ if (!priv->next_context) {
+ /* Signal the end of the trace if the last event did not result
+ * in a postponed context switch or if that context switch had
+ * been applied at a previous event.
+ */
+ if (priv->current == priv->next)
+ return -pte_eos;
+
+ return 0;
+ }
+
+ /* Apply a postponed context switch inside kernel mode.
+ *
+ * For our purposes it does not matter where exactly we are in the
+ * kernel.
+ *
+ * In order to catch the first event window right before a tracing
+ * enabled event after some time of tracing being disabled (or at the
+ * beginning of the trace), we also accept an unknown location.
+ */
+ switch (oldloc) {
+ case ploc_likely_in_kernel:
+ case ploc_in_kernel:
+ case ploc_unknown:
+ return pt_sb_pevent_switch_contexts(session, image, priv);
+
+ default:
+ switch (priv->location) {
+ case ploc_likely_in_kernel:
+ case ploc_in_kernel:
+ case ploc_unknown:
+ return pt_sb_pevent_switch_contexts(session, image,
+ priv);
+
+ default:
+ break;
+ }
+
+ break;
+ };
+
+ return 0;
+}
+
+static int pt_sb_pevent_fetch_callback(struct pt_sb_session *session,
+ uint64_t *tsc, void *priv)
+{
+ int errcode;
+
+ errcode = pt_sb_pevent_fetch(tsc, (struct pt_sb_pevent_priv *) priv);
+ if ((errcode < 0) && (errcode != -pte_eos))
+ pt_sb_pevent_error(session, errcode,
+ (struct pt_sb_pevent_priv *) priv);
+
+ return errcode;
+}
+
+static int pt_sb_pevent_print_callback(struct pt_sb_session *session,
+ FILE *stream, uint32_t flags, void *priv)
+{
+ int errcode;
+
+ errcode = pt_sb_pevent_print((struct pt_sb_pevent_priv *) priv, stream,
+ flags);
+ if (errcode < 0)
+ return pt_sb_pevent_error(session, errcode,
+ (struct pt_sb_pevent_priv *) priv);
+
+ return 0;
+}
+
+static int pt_sb_pevent_apply_callback(struct pt_sb_session *session,
+ struct pt_image **image,
+ const struct pt_event *event, void *priv)
+{
+ int errcode;
+
+ errcode = pt_sb_pevent_apply(session, image, event,
+ (struct pt_sb_pevent_priv *) priv);
+ if ((errcode < 0) && (errcode != -pte_eos))
+ return pt_sb_pevent_error(session, errcode,
+ (struct pt_sb_pevent_priv *) priv);
+
+ return errcode;
+}
+
+int pt_sb_alloc_pevent_decoder(struct pt_sb_session *session,
+ const struct pt_sb_pevent_config *pev)
+{
+ struct pt_sb_decoder_config config;
+ struct pt_sb_pevent_priv *priv;
+ int errcode;
+
+ if (!session || !pev)
+ return -pte_invalid;
+
+ priv = malloc(sizeof(*priv));
+ if (!priv)
+ return -pte_nomem;
+
+ errcode = pt_sb_pevent_init(priv, pev);
+ if (errcode < 0) {
+ free(priv);
+ return errcode;
+ }
+
+ memset(&config, 0, sizeof(config));
+ config.fetch = pt_sb_pevent_fetch_callback;
+ config.apply = pt_sb_pevent_apply_callback;
+ config.print = pt_sb_pevent_print_callback;
+ config.dtor = pt_sb_pevent_dtor;
+ config.priv = priv;
+ config.primary = pev->primary;
+
+ errcode = pt_sb_alloc_decoder(session, &config);
+ if (errcode < 0)
+ free(priv);
+
+ return errcode;
+}
+
+#endif /* FEATURE_PEVENT */
diff --git a/sideband/src/pt_sb_session.c b/sideband/src/pt_sb_session.c
new file mode 100644
index 000000000000..9c2e644fb698
--- /dev/null
+++ b/sideband/src/pt_sb_session.c
@@ -0,0 +1,623 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_sb_session.h"
+#include "pt_sb_context.h"
+#include "pt_sb_decoder.h"
+
+#include "libipt-sb.h"
+#include "intel-pt.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+#if defined(_MSC_VER) && (_MSC_VER < 1900)
+# define snprintf _snprintf_c
+#endif
+
+
+struct pt_sb_session *pt_sb_alloc(struct pt_image_section_cache *iscache)
+{
+ struct pt_sb_session *session;
+ struct pt_image *kernel;
+
+ kernel = pt_image_alloc("kernel");
+ if (!kernel)
+ return NULL;
+
+ session = malloc(sizeof(*session));
+ if (!session) {
+ pt_image_free(kernel);
+ return NULL;
+ }
+
+ memset(session, 0, sizeof(*session));
+ session->iscache = iscache;
+ session->kernel = kernel;
+
+ return session;
+}
+
+static void pt_sb_free_decoder(struct pt_sb_decoder *decoder)
+{
+ void (*dtor)(void *);
+
+ if (!decoder)
+ return;
+
+ dtor = decoder->dtor;
+ if (dtor)
+ dtor(decoder->priv);
+
+ free(decoder);
+}
+
+static void pt_sb_free_decoder_list(struct pt_sb_decoder *list)
+{
+ while (list) {
+ struct pt_sb_decoder *trash;
+
+ trash = list;
+ list = trash->next;
+
+ pt_sb_free_decoder(trash);
+ }
+}
+
+void pt_sb_free(struct pt_sb_session *session)
+{
+ struct pt_sb_context *context;
+
+ if (!session)
+ return;
+
+ pt_sb_free_decoder_list(session->decoders);
+ pt_sb_free_decoder_list(session->waiting);
+ pt_sb_free_decoder_list(session->retired);
+ pt_sb_free_decoder_list(session->removed);
+
+ context = session->contexts;
+ while (context) {
+ struct pt_sb_context *trash;
+
+ trash = context;
+ context = trash->next;
+
+ (void) pt_sb_ctx_put(trash);
+ }
+
+ pt_image_free(session->kernel);
+
+ free(session);
+}
+
+struct pt_image_section_cache *pt_sb_iscache(struct pt_sb_session *session)
+{
+ if (!session)
+ return NULL;
+
+ return session->iscache;
+}
+
+struct pt_image *pt_sb_kernel_image(struct pt_sb_session *session)
+{
+ if (!session)
+ return NULL;
+
+ return session->kernel;
+}
+
+static int pt_sb_add_context_by_pid(struct pt_sb_context **pcontext,
+ struct pt_sb_session *session, uint32_t pid)
+{
+ struct pt_sb_context *context;
+ struct pt_image *kernel;
+ char iname[16];
+ int errcode;
+
+ if (!pcontext || !session)
+ return -pte_invalid;
+
+ kernel = pt_sb_kernel_image(session);
+ if (!kernel)
+ return -pte_internal;
+
+ memset(iname, 0, sizeof(iname));
+ (void) snprintf(iname, sizeof(iname), "pid-%x", pid);
+
+ context = pt_sb_ctx_alloc(iname);
+ if (!context)
+ return -pte_nomem;
+
+ errcode = pt_image_copy(context->image, kernel);
+ if (errcode < 0) {
+ (void) pt_sb_ctx_put(context);
+ return errcode;
+ }
+
+ context->next = session->contexts;
+ context->pid = pid;
+
+ session->contexts = context;
+ *pcontext = context;
+
+ return 0;
+}
+
+int pt_sb_get_context_by_pid(struct pt_sb_context **context,
+ struct pt_sb_session *session, uint32_t pid)
+{
+ int status;
+
+ if (!context || !session)
+ return -pte_invalid;
+
+ status = pt_sb_find_context_by_pid(context, session, pid);
+ if (status < 0)
+ return status;
+
+ if (*context)
+ return 0;
+
+ return pt_sb_add_context_by_pid(context, session, pid);
+}
+
+int pt_sb_find_context_by_pid(struct pt_sb_context **pcontext,
+ struct pt_sb_session *session, uint32_t pid)
+{
+ struct pt_sb_context *ctx;
+
+ if (!pcontext || !session)
+ return -pte_invalid;
+
+ for (ctx = session->contexts; ctx; ctx = ctx->next) {
+ if (ctx->pid == pid)
+ break;
+ }
+
+ *pcontext = ctx;
+
+ return 0;
+}
+
+int pt_sb_remove_context(struct pt_sb_session *session,
+ struct pt_sb_context *context)
+{
+ struct pt_sb_context **pnext, *ctx;
+
+ if (!session || !context)
+ return -pte_invalid;
+
+ pnext = &session->contexts;
+ for (ctx = *pnext; ctx; pnext = &ctx->next, ctx = *pnext) {
+ if (ctx == context)
+ break;
+ }
+
+ if (!ctx)
+ return -pte_nosync;
+
+ *pnext = ctx->next;
+
+ return pt_sb_ctx_put(ctx);
+}
+
+int pt_sb_alloc_decoder(struct pt_sb_session *session,
+ const struct pt_sb_decoder_config *config)
+{
+ struct pt_sb_decoder *decoder;
+
+ if (!session || !config)
+ return -pte_invalid;
+
+ decoder = malloc(sizeof(*decoder));
+ if (!decoder)
+ return -pte_nomem;
+
+ memset(decoder, 0, sizeof(*decoder));
+ decoder->next = session->waiting;
+ decoder->fetch = config->fetch;
+ decoder->apply = config->apply;
+ decoder->print = config->print;
+ decoder->dtor = config->dtor;
+ decoder->priv = config->priv;
+ decoder->primary = config->primary;
+
+ session->waiting = decoder;
+
+ return 0;
+}
+
+/* Add a new decoder to a list of decoders.
+ *
+ * Decoders in @list are ordered by their @tsc (ascending) and @list does not
+ * contain @decoder. Find the right place for @decoder at add it to @list.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_sb_add_decoder(struct pt_sb_decoder **list,
+ struct pt_sb_decoder *decoder)
+{
+ struct pt_sb_decoder *cand;
+ uint64_t tsc;
+
+ if (!list || !decoder || decoder->next)
+ return -pte_internal;
+
+ tsc = decoder->tsc;
+ for (cand = *list; cand; list = &cand->next, cand = *list) {
+ if (tsc <= cand->tsc)
+ break;
+ }
+
+ decoder->next = cand;
+ *list = decoder;
+
+ return 0;
+}
+
+static int pt_sb_fetch(struct pt_sb_session *session,
+ struct pt_sb_decoder *decoder)
+{
+ int (*fetch)(struct pt_sb_session *, uint64_t *, void *);
+
+ if (!decoder)
+ return -pte_internal;
+
+ fetch = decoder->fetch;
+ if (!fetch)
+ return -pte_bad_config;
+
+ return fetch(session, &decoder->tsc, decoder->priv);
+}
+
+static int pt_sb_print(struct pt_sb_session *session,
+ struct pt_sb_decoder *decoder, FILE *stream,
+ uint32_t flags)
+{
+ int (*print)(struct pt_sb_session *, FILE *, uint32_t, void *);
+
+ if (!decoder)
+ return -pte_internal;
+
+ print = decoder->print;
+ if (!print)
+ return -pte_bad_config;
+
+ return print(session, stream, flags, decoder->priv);
+}
+
+static int pt_sb_apply(struct pt_sb_session *session, struct pt_image **image,
+ struct pt_sb_decoder *decoder,
+ const struct pt_event *event)
+{
+ int (*apply)(struct pt_sb_session *, struct pt_image **,
+ const struct pt_event *, void *);
+
+ if (!decoder || !event)
+ return -pte_internal;
+
+ apply = decoder->apply;
+ if (!apply)
+ return -pte_bad_config;
+
+ if (!decoder->primary)
+ image = NULL;
+
+ return apply(session, image, event, decoder->priv);
+}
+
+int pt_sb_init_decoders(struct pt_sb_session *session)
+{
+ struct pt_sb_decoder *decoder;
+
+ if (!session)
+ return -pte_invalid;
+
+ decoder = session->waiting;
+ while (decoder) {
+ int errcode;
+
+ session->waiting = decoder->next;
+ decoder->next = NULL;
+
+ errcode = pt_sb_fetch(session, decoder);
+ if (errcode < 0) {
+ /* Fetch errors remove @decoder. In this case, they
+ * prevent it from being added in the first place.
+ */
+ pt_sb_free_decoder(decoder);
+ } else {
+ errcode = pt_sb_add_decoder(&session->decoders,
+ decoder);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ decoder = session->waiting;
+ }
+
+ return 0;
+}
+
+/* Copy an event provided by an unknown version of libipt.
+ *
+ * Copy at most @size bytes of @uevent into @event and zero-initialize any
+ * additional bytes in @event not covered by @uevent.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_sb_event_from_user(struct pt_event *event,
+ const struct pt_event *uevent, size_t size)
+{
+ if (!event || !uevent)
+ return -pte_internal;
+
+ if (size < offsetof(struct pt_event, reserved))
+ return -pte_invalid;
+
+ /* Ignore fields in the user's event we don't know; zero out fields the
+ * user didn't know about.
+ */
+ if (sizeof(*event) < size)
+ size = sizeof(*event);
+ else
+ memset(((uint8_t *) event) + size, 0, sizeof(*event) - size);
+
+ /* Copy (portions of) the user's event. */
+ memcpy(event, uevent, size);
+
+ return 0;
+}
+
+static int pt_sb_event_present(struct pt_sb_session *session,
+ struct pt_image **image,
+ struct pt_sb_decoder **pnext,
+ const struct pt_event *event)
+{
+ struct pt_sb_decoder *decoder;
+ int errcode;
+
+ if (!session || !pnext)
+ return -pte_internal;
+
+ decoder = *pnext;
+ while (decoder) {
+ errcode = pt_sb_apply(session, image, decoder, event);
+ if (errcode < 0) {
+ struct pt_sb_decoder *trash;
+
+ trash = decoder;
+ decoder = trash->next;
+ *pnext = decoder;
+
+ trash->next = session->removed;
+ session->removed = trash;
+ continue;
+ }
+
+ pnext = &decoder->next;
+ decoder = *pnext;
+ }
+
+ return 0;
+}
+
+int pt_sb_event(struct pt_sb_session *session, struct pt_image **image,
+ const struct pt_event *uevent, size_t size, FILE *stream,
+ uint32_t flags)
+{
+ struct pt_sb_decoder *decoder;
+ struct pt_event event;
+ int errcode;
+
+ if (!session || !uevent)
+ return -pte_invalid;
+
+ errcode = pt_sb_event_from_user(&event, uevent, size);
+ if (errcode < 0)
+ return errcode;
+
+ /* In the initial round, we present the event to all decoders with
+ * records for a smaller or equal timestamp.
+ *
+ * We only need to look at the first decoder. We remove it from the
+ * list and ask it to apply the event. Then, we ask it to fetch the
+ * next record and re-add it to the list according to that next record's
+ * timestamp.
+ */
+ for (;;) {
+ decoder = session->decoders;
+ if (!decoder)
+ break;
+
+ /* We don't check @event.has_tsc to support sideband
+ * correlation based on relative (non-wall clock) time.
+ */
+ if (event.tsc < decoder->tsc)
+ break;
+
+ session->decoders = decoder->next;
+ decoder->next = NULL;
+
+ if (stream) {
+ errcode = pt_sb_print(session, decoder, stream, flags);
+ if (errcode < 0) {
+ decoder->next = session->removed;
+ session->removed = decoder;
+ continue;
+ }
+ }
+
+ errcode = pt_sb_apply(session, image, decoder, &event);
+ if (errcode < 0) {
+ decoder->next = session->removed;
+ session->removed = decoder;
+ continue;
+ }
+
+ errcode = pt_sb_fetch(session, decoder);
+ if (errcode < 0) {
+ if (errcode == -pte_eos) {
+ decoder->next = session->retired;
+ session->retired = decoder;
+ } else {
+ decoder->next = session->removed;
+ session->removed = decoder;
+ }
+
+ continue;
+ }
+
+ errcode = pt_sb_add_decoder(&session->decoders, decoder);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ /* In the second round, we present the event to all decoders.
+ *
+ * This allows decoders to postpone actions until an appropriate event,
+ * e.g entry into or exit from the kernel.
+ */
+ errcode = pt_sb_event_present(session, image, &session->decoders,
+ &event);
+ if (errcode < 0)
+ return errcode;
+
+ return pt_sb_event_present(session, image, &session->retired, &event);
+}
+
+int pt_sb_dump(struct pt_sb_session *session, FILE *stream, uint32_t flags,
+ uint64_t tsc)
+{
+ struct pt_sb_decoder *decoder;
+ int errcode;
+
+ if (!session || !stream)
+ return -pte_invalid;
+
+ for (;;) {
+ decoder = session->decoders;
+ if (!decoder)
+ break;
+
+ if (tsc < decoder->tsc)
+ break;
+
+ session->decoders = decoder->next;
+ decoder->next = NULL;
+
+ errcode = pt_sb_print(session, decoder, stream, flags);
+ if (errcode < 0) {
+ decoder->next = session->removed;
+ session->removed = decoder;
+ continue;
+ }
+
+ errcode = pt_sb_fetch(session, decoder);
+ if (errcode < 0) {
+ decoder->next = session->removed;
+ session->removed = decoder;
+ continue;
+ }
+
+ errcode = pt_sb_add_decoder(&session->decoders, decoder);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+}
+
+pt_sb_ctx_switch_notifier_t *
+pt_sb_notify_switch(struct pt_sb_session *session,
+ pt_sb_ctx_switch_notifier_t *notifier, void *priv)
+{
+ pt_sb_ctx_switch_notifier_t *old;
+
+ if (!session)
+ return NULL;
+
+ old = session->notify_switch_to;
+
+ session->notify_switch_to = notifier;
+ session->priv_switch_to = priv;
+
+ return old;
+}
+
+pt_sb_error_notifier_t *
+pt_sb_notify_error(struct pt_sb_session *session,
+ pt_sb_error_notifier_t *notifier, void *priv)
+{
+ pt_sb_error_notifier_t *old;
+
+ if (!session)
+ return NULL;
+
+ old = session->notify_error;
+
+ session->notify_error = notifier;
+ session->priv_error = priv;
+
+ return old;
+}
+
+int pt_sb_error(const struct pt_sb_session *session, int errcode,
+ const char *filename, uint64_t offset)
+{
+ pt_sb_error_notifier_t *notifier;
+
+ if (!session)
+ return -pte_internal;
+
+ notifier = session->notify_error;
+ if (!notifier)
+ return 0;
+
+ return notifier(errcode, filename, offset, session->priv_error);
+}
+
+const char *pt_sb_errstr(enum pt_sb_error_code errcode)
+{
+ switch (errcode) {
+ case ptse_ok:
+ return "OK";
+
+ case ptse_lost:
+ return "sideband lost";
+
+ case ptse_trace_lost:
+ return "trace lost";
+
+ case ptse_section_lost:
+ return "image section lost";
+ }
+
+ return "bad errcode";
+}
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
new file mode 100644
index 000000000000..45cebfb97a0d
--- /dev/null
+++ b/test/CMakeLists.txt
@@ -0,0 +1,63 @@
+# Copyright (c) 2015-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+function(add_ptt_test name)
+ set(pttc $<TARGET_FILE:pttc>)
+ set(ptdump $<TARGET_FILE:ptdump>)
+ set(ptxed $<TARGET_FILE:ptxed>)
+ set(script ${BASH} ${CMAKE_SOURCE_DIR}/script/test.bash)
+ set(test ${CMAKE_CURRENT_SOURCE_DIR}/src/${name})
+
+ add_test(
+ NAME insn-${name}
+ COMMAND ${script} -f -g ${pttc} -d ${ptdump} -x ${ptxed} -X --insn-decoder ${test}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ptt-insn
+ )
+
+ add_test(
+ NAME block-${name}
+ COMMAND ${script} -f -g ${pttc} -d ${ptdump} -x ${ptxed} -X --block-decoder ${test}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/ptt-block
+ )
+endfunction(add_ptt_test)
+
+file(GLOB TESTS
+ LIST_DIRECTORIES false
+ RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}/src/
+ src/*.ptt
+)
+
+file(MAKE_DIRECTORY
+ ${CMAKE_CURRENT_BINARY_DIR}/ptt-insn
+ ${CMAKE_CURRENT_BINARY_DIR}/ptt-block
+)
+foreach (test ${TESTS})
+ add_ptt_test(${test})
+endforeach ()
+
+if (SIDEBAND AND PEVENT)
+ add_subdirectory(pevent)
+endif (SIDEBAND AND PEVENT)
diff --git a/test/pevent/CMakeLists.txt b/test/pevent/CMakeLists.txt
new file mode 100644
index 000000000000..77f45ad65d7c
--- /dev/null
+++ b/test/pevent/CMakeLists.txt
@@ -0,0 +1,40 @@
+# Copyright (c) 2015-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+file(GLOB TESTS
+ LIST_DIRECTORIES false
+ RELATIVE ${CMAKE_CURRENT_SOURCE_DIR}/src/
+ src/*.ptt
+)
+
+file(MAKE_DIRECTORY
+ ${CMAKE_CURRENT_BINARY_DIR}/ptt-insn
+ ${CMAKE_CURRENT_BINARY_DIR}/ptt-block
+)
+
+foreach (test ${TESTS})
+ add_ptt_test(${test})
+endforeach ()
diff --git a/test/pevent/src/pevent-comm_exec-mmap-tsc-iret.ptt b/test/pevent/src/pevent-comm_exec-mmap-tsc-iret.ptt
new file mode 100644
index 000000000000..53a88f5de50d
--- /dev/null
+++ b/test/pevent/src/pevent-comm_exec-mmap-tsc-iret.ptt
@@ -0,0 +1,99 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test COMM.EXEC in perf_event sideband.
+;
+; Variant: all-ring tracing
+; perfect timing information
+; no context switch between COMM.EXEC and return to userland
+;
+; opt:ptdump --sb:compact --sb:offset
+; opt:ptxed --event:tick --sb:compact --sb:offset --pevent:kernel-start 0xffffffff80000000
+;
+
+org 0x1000
+bits 64
+
+
+; @sb primary(pevent)
+; @sb pevent-sample_type(time)
+; @sb s0: pevent-mmap-section(kernel, 1, 1, 0x0)
+; @sb s1: pevent-itrace-start(1, 1, 0x0)
+
+section kernel vstart=0xffffffff80000000 start=0x1000
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: tsc(0x1)
+; @pt p3: fup(3: %l0)
+; @pt p4: psbend()
+l0: nop
+
+; @sb s2: pevent-comm.exec(1, 1, "foo", 0x2)
+; @sb s3: pevent-mmap-section(kernel, 1, 1, 0x2)
+; @sb s4: pevent-mmap-section(text, 1, 1, 0x3)
+
+; @pt p5: tsc(0x4)
+; @pt p6: tip(3: %l3)
+l1: iret
+l2: hlt
+
+section text vstart=0x1000 start=0x1010
+l3: nop
+
+; @pt p7: fup(1: %l4)
+; @pt p8: tip.pgd(0: %l5)
+l4: nop
+l5: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0s0 PERF_RECORD_MMAP 1/1, ffffffff80000000, 3, 0, pevent-comm_exec-mmap-tsc-iret.bin { 0 }
+;%0s1 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0p2 tsc 1
+;%0p3 fup 3: %?l0
+;%0p4 psbend
+;%0s2 PERF_RECORD_COMM.EXEC 1/1, "foo" { 2 }
+;%0s3 PERF_RECORD_MMAP 1/1, ffffffff80000000, 3, 0, pevent-comm_exec-mmap-tsc-iret.bin { 2 }
+;%0s4 PERF_RECORD_MMAP 1/1, 1000, 3, 10, pevent-comm_exec-mmap-tsc-iret.bin { 3 }
+;%0p5 tsc 4
+;%0p6 tip 3: %?l3
+;%0p7 fup 1: %?l4.2
+;%0p8 tip.pgd 0: %?l5.0
+
+
+; @pt .exp(ptxed)
+;%0s0 PERF_RECORD_MMAP 1/1, ffffffff80000000, 3, 0, pevent-comm_exec-mmap-tsc-iret.bin { 0 }
+;%0s1 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0l0 # nop
+;%0l1 # iret
+;[tick]
+;%0s2 PERF_RECORD_COMM.EXEC 1/1, "foo" { 2 }
+;%0s3 PERF_RECORD_MMAP 1/1, ffffffff80000000, 3, 0, pevent-comm_exec-mmap-tsc-iret.bin { 2 }
+;%0s4 PERF_RECORD_MMAP 1/1, 1000, 3, 10, pevent-comm_exec-mmap-tsc-iret.bin { 3 }
+;%0l3 # nop
+;[disabled]
diff --git a/test/pevent/src/pevent-dump.ptt b/test/pevent/src/pevent-dump.ptt
new file mode 100644
index 000000000000..a174466bcd44
--- /dev/null
+++ b/test/pevent/src/pevent-dump.ptt
@@ -0,0 +1,83 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test dumping all supported perf event packets.
+;
+; Variant: compact format
+;
+; opt:ptdump --sb:compact --sb:offset --sb:time
+;
+
+org 0x1000
+bits 64
+
+
+; @sb primary(pevent)
+; @sb pevent-sample_type(tid, time, id, stream, cpu, identifier)
+
+; @pt p0: psb()
+
+; @sb s0: pevent-mmap-section(text, 1, 1, 1, 1, 0, 1, 2, 3, 4)
+; @sb s1: pevent-lost(3, 8, 1, 2, 0, 3, 4, 5, 6)
+; @sb s2: pevent-comm(1, 1, foo, 1, 1, 0, 2, 3, 4, 5)
+; @sb s3: pevent-comm.exec(2, 2, bar, 2, 2, 0, 3, 4, 5, 6)
+; @sb s4: pevent-exit(2, 1, 2, 1, 0, 2, 2, 0, 3, 4, 5, 6)
+; @sb s5: pevent-fork(2, 1, 2, 1, 0, 2, 2, 0, 3, 4, 5, 6)
+; @sb s6: pevent-aux(0xa000, 0x1000, 0, 2, 2, 0, 3, 4, 5, 6)
+; @sb s7: pevent-aux(0xb000, 0x700, 1, 2, 2, 0, 3, 4, 5, 6)
+; @sb s8: pevent-itrace-start(1, 2, 1, 2, 2, 3, 4, 5, 6)
+
+; @pt p1: tsc(2)
+; @pt p2: psbend()
+
+; @sb s9: pevent-lost-samples(8, 1, 2, 3, 4, 5, 6, 7)
+; @sb s10: pevent-switch.in(1, 2, 3, 4, 5, 6, 7)
+; @sb s11: pevent-switch.out(1, 2, 3, 4, 5, 6, 7)
+; @sb s12: pevent-switch-cpu-wide.in(8, 9, 1, 2, 3, 4, 5, 6, 7)
+; @sb s13: pevent-switch-cpu-wide.out(8, 9, 1, 2, 3, 4, 5, 6, 7)
+
+section text vstart=0xa000 start=0x1000
+ nop
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0s0 0000000000000000 PERF_RECORD_MMAP 1/1, a000, 1, 0, pevent-dump.bin { 1/1 0 1 cpu-2 3 4 }
+;%0s1 0000000000000000 PERF_RECORD_LOST 3, 8 { 1/2 0 3 cpu-4 5 6 }
+;%0s2 0000000000000000 PERF_RECORD_COMM 1/1, foo { 1/1 0 2 cpu-3 4 5 }
+;%0s3 0000000000000000 PERF_RECORD_COMM.EXEC 2/2, bar { 2/2 0 3 cpu-4 5 6 }
+;%0s4 0000000000000000 PERF_RECORD_EXIT 2/2, 1/1, 0 { 2/2 0 3 cpu-4 5 6 }
+;%0s5 0000000000000000 PERF_RECORD_FORK 2/2, 1/1, 0 { 2/2 0 3 cpu-4 5 6 }
+;%0s6 0000000000000000 PERF_RECORD_AUX a000, 1000, 0 { 2/2 0 3 cpu-4 5 6 }
+;%0s7 0000000000000000 PERF_RECORD_AUX.TRUNCATED b000, 700, 1 { 2/2 0 3 cpu-4 5 6 }
+;%0s8 0000000000000002 PERF_RECORD_ITRACE_START 1/2 { 1/2 2 3 cpu-4 5 6 }
+;%0p1 tsc 2
+;%0p2 psbend
+;%0s9 0000000000000003 PERF_RECORD_LOST_SAMPLES 8 { 1/2 3 4 cpu-5 6 7 }
+;%0s10 0000000000000003 PERF_RECORD_SWITCH.IN { 1/2 3 4 cpu-5 6 7 }
+;%0s11 0000000000000003 PERF_RECORD_SWITCH.OUT { 1/2 3 4 cpu-5 6 7 }
+;%0s12 0000000000000003 PERF_RECORD_SWITCH_CPU_WIDE.IN 8/9 { 1/2 3 4 cpu-5 6 7 }
+;%0s13 0000000000000003 PERF_RECORD_SWITCH_CPU_WIDE.OUT 8/9 { 1/2 3 4 cpu-5 6 7 }
diff --git a/test/pevent/src/pevent-dump_verbose.ptt b/test/pevent/src/pevent-dump_verbose.ptt
new file mode 100644
index 000000000000..d61d886d3c19
--- /dev/null
+++ b/test/pevent/src/pevent-dump_verbose.ptt
@@ -0,0 +1,218 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test dumping all supported perf event packets.
+;
+; Variant: verbose format
+;
+; opt:ptdump --sb:verbose --sb:offset --sb:time
+;
+
+org 0x1000
+bits 64
+
+
+; @sb primary(pevent)
+; @sb pevent-sample_type(tid, time, id, stream, cpu, identifier)
+
+; @pt p0: psb()
+
+; @sb s0: pevent-mmap-section(text, 1, 1, 1, 1, 0, 1, 2, 3, 4)
+; @sb s1: pevent-lost(3, 8, 1, 2, 0, 3, 4, 5, 6)
+; @sb s2: pevent-comm(1, 1, foo, 1, 1, 0, 2, 3, 4, 5)
+; @sb s3: pevent-comm.exec(2, 2, bar, 2, 2, 0, 3, 4, 5, 6)
+; @sb s4: pevent-exit(2, 1, 2, 1, 0, 2, 2, 0, 3, 4, 5, 6)
+; @sb s5: pevent-fork(2, 1, 2, 1, 0, 2, 2, 0, 3, 4, 5, 6)
+; @sb s6: pevent-aux(0xa000, 0x1000, 0, 2, 2, 0, 3, 4, 5, 6)
+; @sb s7: pevent-aux(0xb000, 0x700, 1, 2, 2, 0, 3, 4, 5, 6)
+; @sb s8: pevent-itrace-start(1, 2, 1, 2, 2, 3, 4, 5, 6)
+
+; @pt p1: tsc(2)
+; @pt p2: psbend()
+
+; @sb s9: pevent-lost-samples(8, 1, 2, 3, 4, 5, 6, 7)
+; @sb s10: pevent-switch.in(1, 2, 3, 4, 5, 6, 7)
+; @sb s11: pevent-switch.out(1, 2, 3, 4, 5, 6, 7)
+; @sb s12: pevent-switch-cpu-wide.in(8, 9, 1, 2, 3, 4, 5, 6, 7)
+; @sb s13: pevent-switch-cpu-wide.out(8, 9, 1, 2, 3, 4, 5, 6, 7)
+
+section text vstart=0xa000 start=0x1000
+ nop
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0s0 0000000000000000 PERF_RECORD_MMAP
+; pid: 1
+; tid: 1
+; addr: a000
+; len: 1
+; pgoff: 0
+; filename: pevent-dump_verbose.bin
+; pid: 1
+; tid: 1
+; time: 0
+; id: 1
+; cpu: 2
+; stream id: 3
+; identifier: 4
+;%0s1 0000000000000000 PERF_RECORD_LOST
+; id: 3
+; lost: 8
+; pid: 1
+; tid: 2
+; time: 0
+; id: 3
+; cpu: 4
+; stream id: 5
+; identifier: 6
+;%0s2 0000000000000000 PERF_RECORD_COMM
+; pid: 1
+; tid: 1
+; comm: foo
+; pid: 1
+; tid: 1
+; time: 0
+; id: 2
+; cpu: 3
+; stream id: 4
+; identifier: 5
+;%0s3 0000000000000000 PERF_RECORD_COMM.EXEC
+; pid: 2
+; tid: 2
+; comm: bar
+; pid: 2
+; tid: 2
+; time: 0
+; id: 3
+; cpu: 4
+; stream id: 5
+; identifier: 6
+;%0s4 0000000000000000 PERF_RECORD_EXIT
+; pid: 2
+; ppid: 1
+; tid: 2
+; ptid: 1
+; time: 0
+; pid: 2
+; tid: 2
+; time: 0
+; id: 3
+; cpu: 4
+; stream id: 5
+; identifier: 6
+;%0s5 0000000000000000 PERF_RECORD_FORK
+; pid: 2
+; ppid: 1
+; tid: 2
+; ptid: 1
+; time: 0
+; pid: 2
+; tid: 2
+; time: 0
+; id: 3
+; cpu: 4
+; stream id: 5
+; identifier: 6
+;%0s6 0000000000000000 PERF_RECORD_AUX
+; aux offset: a000
+; aux size: 1000
+; flags: 0
+; pid: 2
+; tid: 2
+; time: 0
+; id: 3
+; cpu: 4
+; stream id: 5
+; identifier: 6
+;%0s7 0000000000000000 PERF_RECORD_AUX.TRUNCATED
+; aux offset: b000
+; aux size: 700
+; flags: 1
+; pid: 2
+; tid: 2
+; time: 0
+; id: 3
+; cpu: 4
+; stream id: 5
+; identifier: 6
+;%0s8 0000000000000002 PERF_RECORD_ITRACE_START
+; pid: 1
+; tid: 2
+; pid: 1
+; tid: 2
+; time: 2
+; id: 3
+; cpu: 4
+; stream id: 5
+; identifier: 6
+;%0p1 tsc 2
+;%0p2 psbend
+;%0s9 0000000000000003 PERF_RECORD_LOST_SAMPLES
+; lost: 8
+; pid: 1
+; tid: 2
+; time: 3
+; id: 4
+; cpu: 5
+; stream id: 6
+; identifier: 7
+;%0s10 0000000000000003 PERF_RECORD_SWITCH.IN
+; pid: 1
+; tid: 2
+; time: 3
+; id: 4
+; cpu: 5
+; stream id: 6
+; identifier: 7
+;%0s11 0000000000000003 PERF_RECORD_SWITCH.OUT
+; pid: 1
+; tid: 2
+; time: 3
+; id: 4
+; cpu: 5
+; stream id: 6
+; identifier: 7
+;%0s12 0000000000000003 PERF_RECORD_SWITCH_CPU_WIDE.IN
+; prev pid: 8
+; prev tid: 9
+; pid: 1
+; tid: 2
+; time: 3
+; id: 4
+; cpu: 5
+; stream id: 6
+; identifier: 7
+;%0s13 0000000000000003 PERF_RECORD_SWITCH_CPU_WIDE.OUT
+; next pid: 8
+; next tid: 9
+; pid: 1
+; tid: 2
+; time: 3
+; id: 4
+; cpu: 5
+; stream id: 6
+; identifier: 7
diff --git a/test/pevent/src/pevent-fork.ptt b/test/pevent/src/pevent-fork.ptt
new file mode 100644
index 000000000000..28cf86743f56
--- /dev/null
+++ b/test/pevent/src/pevent-fork.ptt
@@ -0,0 +1,91 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test FORK in perf_event sideband.
+;
+; opt:ptdump --sb:compact --sb:offset
+; opt:ptxed --event:tick --sb:compact --sb:offset --pevent:kernel-start 0xffffffff80000000
+;
+
+org 0x1000
+bits 64
+
+
+; @sb primary(pevent)
+; @sb pevent-sample_type(time)
+; @sb s0: pevent-mmap-section(kernel, 1, 1, 0x0)
+; @sb s1: pevent-mmap-section(text, 1, 1, 0x0)
+; @sb s2: pevent-itrace-start(1, 1, 0x0)
+
+section kernel vstart=0xffffffff80000000 start=0x1000
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: tsc(1)
+; @pt p3: fup(3: %l0)
+; @pt p4: psbend()
+l0: nop
+
+; @sb s3: pevent-fork(2, 1, 2, 1, 2, 2)
+
+; @pt p5: tsc(3)
+; @pt p6: tip(3: %l3)
+l1: iret
+l2: hlt
+
+section text vstart=0x1000 start=0x1010
+l3: nop
+
+; @pt p7: fup(1: %l4)
+; @pt p8: tip.pgd(0: %l4)
+l4: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0s0 PERF_RECORD_MMAP 1/1, ffffffff80000000, 3, 0, pevent-fork.bin { 0 }
+;%0s1 PERF_RECORD_MMAP 1/1, 1000, 2, 10, pevent-fork.bin { 0 }
+;%0s2 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0p2 tsc 1
+;%0p3 fup 3: %?l0
+;%0p4 psbend
+;%0s3 PERF_RECORD_FORK 2/2, 1/1, 2 { 2 }
+;%0p5 tsc 3
+;%0p6 tip 3: %?l3
+;%0p7 fup 1: %?l4.2
+;%0p8 tip.pgd 0: %?l4.0
+
+
+; @pt .exp(ptxed)
+;%0s0 PERF_RECORD_MMAP 1/1, ffffffff80000000, 3, 0, pevent-fork.bin { 0 }
+;%0s1 PERF_RECORD_MMAP 1/1, 1000, 2, 10, pevent-fork.bin { 0 }
+;%0s2 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0l0 # nop
+;%0l1 # iret
+;[tick]
+;%0s3 PERF_RECORD_FORK 2/2, 1/1, 2 { 2 }
+;%0l3 # nop
+;[disabled]
diff --git a/test/pevent/src/pevent-mmap-tip_cached.ptt b/test/pevent/src/pevent-mmap-tip_cached.ptt
new file mode 100644
index 000000000000..2aef5af0f02b
--- /dev/null
+++ b/test/pevent/src/pevent-mmap-tip_cached.ptt
@@ -0,0 +1,93 @@
+; Copyright (c) 2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test github issue #34.
+;
+; When the block decoder switched images it forgot to update the block's isid.
+; When the next block was decoded from the cache it would get a zero isid.
+;
+; opt:ptdump --sb:compact --sb:offset
+; opt:ptxed --sb:compact --sb:offset
+;
+
+org 0x1000
+bits 64
+
+; @sb primary(pevent)
+; @sb pevent-sample_type(time)
+; @sb s0: pevent-mmap-section(text, 1, 1, 0x0)
+; @sb s1: pevent-mmap-section(foo, 1, 1, 0x0)
+; @sb s2: pevent-itrace-start(1, 1, 0x0)
+
+section text vstart=0x1000 start=0x1000
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: tsc(0x1)
+; @pt p4: psbend()
+l0: nop
+
+; @pt p5: tip(1: %l5)
+; @pt p6: tip(1: %l0)
+; @pt p7: tip(1: %l3)
+l1: jmp rax
+l2: hlt
+
+l3: nop
+
+; @pt p8: fup(1: %l4)
+; @pt p9: tip.pgd(0: %l4)
+l4: hlt
+
+section foo vstart=0xa000 start=0x1100
+l5: ret
+
+; @pt .exp(ptxed)
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, 6, 0, pevent-mmap-tip_cached.bin { 0 }
+;%0s1 PERF_RECORD_MMAP 1/1, a000, 1, 100, pevent-mmap-tip_cached.bin { 0 }
+;%0s2 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0l0 # nop
+;%0l1 # jmp rax
+;%0l5 # ret
+;%0l0 # nop
+;%0l1 # jmp rax
+;%0l3 # nop
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, 6, 0, pevent-mmap-tip_cached.bin { 0 }
+;%0s1 PERF_RECORD_MMAP 1/1, a000, 1, 100, pevent-mmap-tip_cached.bin { 0 }
+;%0s2 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0p3 tsc 1
+;%0p4 psbend
+;%0p5 tip 1: %?l5.2
+;%0p6 tip 1: %?l0.2
+;%0p7 tip 1: %?l3.2
+;%0p8 fup 1: %?l4.2
+;%0p9 tip.pgd 0: %?l4.0
diff --git a/test/pevent/src/pevent-mmap_secondary-tsc.ptt b/test/pevent/src/pevent-mmap_secondary-tsc.ptt
new file mode 100644
index 000000000000..54d8db4161fa
--- /dev/null
+++ b/test/pevent/src/pevent-mmap_secondary-tsc.ptt
@@ -0,0 +1,101 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MMAP in perf_event sideband.
+;
+; Variant: ring-3 tracing
+; mmap in secondary sideband (other cpu)
+; perfect timing information
+;
+; opt:ptdump --sb:compact --sb:offset
+; opt:ptxed --event:tick --sb:compact --sb:offset
+;
+
+org 0x1000
+bits 64
+
+
+; @sb primary(pevent)
+; @sb pevent-sample_type(time)
+; @sb s0: pevent-mmap-section(text, 1, 1, 0x0)
+; @sb s1: pevent-itrace-start(1, 1, 0x0)
+
+section text vstart=0x1000 start=0x1000
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: tsc(0x1)
+; @pt p3: fup(3: %l0)
+; @pt p4: psbend()
+l0: nop
+
+; @sb secondary(pevent)
+; @sb pevent-sample_type(time)
+; @sb s2: pevent-mmap-section(foo, 1, 1, 0x2)
+
+; @pt p5: tsc(0x3)
+; @pt p6: tip(1: %l5)
+l1: call rax ; l5
+;
+; compressed return
+; @pt p7: tnt(t)
+;
+l2: nop
+
+; @pt p8: fup(1: %l3)
+; @pt p9: tip.pgd(0: %l4)
+l3: nop
+l4: hlt
+
+section foo vstart=0xa000 start=0x1100
+l5: ret
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, 6, 0, pevent-mmap_secondary-tsc.bin { 0 }
+;%0s1 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0p2 tsc 1
+;%0p3 fup 3: %?l0
+;%0p4 psbend
+;%0s2 PERF_RECORD_MMAP 1/1, a000, 1, 100, pevent-mmap_secondary-tsc.bin { 2 }
+;%0p5 tsc 3
+;%0p6 tip 1: %?l5.2
+;%0p7 tnt.8 !
+;%0p8 fup 1: %?l3.2
+;%0p9 tip.pgd 0: %?l4.0
+
+
+; @pt .exp(ptxed)
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, 6, 0, pevent-mmap_secondary-tsc.bin { 0 }
+;%0s1 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0l0 # nop
+;%0l1 # call rax - l5
+;[tick]
+;%0s2 PERF_RECORD_MMAP 1/1, a000, 1, 100, pevent-mmap_secondary-tsc.bin { 2 }
+;%0l5 # ret
+;%0l2 # nop
+;[disabled]
diff --git a/test/pevent/src/pevent-split.ptt b/test/pevent/src/pevent-split.ptt
new file mode 100644
index 000000000000..0c5be1e88e7b
--- /dev/null
+++ b/test/pevent/src/pevent-split.ptt
@@ -0,0 +1,135 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that we handle section splits correctly.
+;
+; There is a corner case when a linear sequence of instructions that jumps
+; back to the section in which it begins is split by adding a new section
+; in the middle of this sequence.
+;
+; If we do not handle this correctly, we may leave a stale block cache entry
+; that would cause us to miss the change made to this sequence by jumping
+; over the newly added section and back into the original section.
+;
+;
+; opt:ptdump --sb:compact --sb:offset
+; opt:ptxed --event:tick --sb:compact --sb:offset
+;
+
+org 0x1000
+bits 64
+
+
+; @sb primary(pevent)
+; @sb pevent-sample_type(time)
+; @sb s0: pevent-mmap-section(text, 1, 1, 0x0)
+; @sb s1: pevent-itrace-start(1, 1, 0x0)
+
+section text vstart=0x1000 start=0x1000
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: tsc(0x1)
+; @pt p3: fup(3: %l0)
+; @pt p4: psbend()
+l0: nop
+l1: nop
+l2: nop
+l3: nop
+
+; @pt p5: tip(1: %l6)
+l4: jmp rax
+l5: hlt
+
+l6: nop
+l7: nop
+l8: nop
+l9: nop
+l10: nop
+l11: nop
+l12: nop
+
+l13: jmp l0
+l14: hlt
+
+; @pt p6: tsc(0x2)
+; @pt p7: tip(1: %l6)
+; @sb s2: pevent-mmap-section(foo, 1, 1, 0x2)
+
+; @pt p8: tip(1: %l15)
+
+; @pt p9: fup(1: %l16)
+; @pt p10: tip.pgd(0: %l16)
+l15: nop
+l16: hlt
+
+section foo vstart=0x1008 start=0x1100
+l17: jmp rax
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, 13, 0, pevent-split.bin { 0 }
+;%0s1 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0p2 tsc 1
+;%0p3 fup 3: %?l0
+;%0p4 psbend
+;%0p5 tip 1: %?l6.2
+;%0s2 PERF_RECORD_MMAP 1/1, 1008, 2, 100, pevent-split.bin { 2 }
+;%0p6 tsc 2
+;%0p7 tip 1: %?l6.2
+;%0p8 tip 1: %?l15.2
+;%0p9 fup 1: %?l16.2
+;%0p10 tip.pgd 0: %?l16.0
+
+
+; @pt .exp(ptxed)
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, 13, 0, pevent-split.bin { 0 }
+;%0s1 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0l0 # nop
+;%0l1 # nop
+;%0l2 # nop
+;%0l3 # nop
+;%0l4 # jmp rax
+;%0l6 # nop
+;%0l7 # nop
+;%0l8 # nop
+;%0l9 # nop
+;%0l10 # nop
+;%0l11 # nop
+;%0l12 # nop
+;%0l13 # jmp l0
+;%0l0 # nop
+;%0l1 # nop
+;%0l2 # nop
+;%0l3 # nop
+;%0l4 # jmp rax
+;[tick]
+;%0s2 PERF_RECORD_MMAP 1/1, 1008, 2, 100, pevent-split.bin { 2 }
+;%0l6 # nop
+;%0l17 # jmp rax
+;%0l15 # nop
+;[disabled]
diff --git a/test/pevent/src/pevent-tip_pgd-comm_exec-mmap-tsc-tip_pge.ptt b/test/pevent/src/pevent-tip_pgd-comm_exec-mmap-tsc-tip_pge.ptt
new file mode 100644
index 000000000000..cc3dc238cb1c
--- /dev/null
+++ b/test/pevent/src/pevent-tip_pgd-comm_exec-mmap-tsc-tip_pge.ptt
@@ -0,0 +1,98 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test COMM.EXEC in perf_event sideband.
+;
+; Variant: ring-3 tracing
+; perfect timing information
+;
+; opt:ptdump --sb:compact --sb:offset
+; opt:ptxed --sb:compact --sb:offset
+;
+
+org 0x1000
+bits 64
+
+
+; @sb primary(pevent)
+; @sb pevent-sample_type(time)
+; @sb s0: pevent-mmap-section(text_1, 1, 1, 0x0)
+; @sb s1: pevent-itrace-start(1, 1, 0x0)
+
+section text_1 vstart=0x1000 start=0x1000
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: tsc(0x1)
+; @pt p3: fup(3: %l0)
+; @pt p4: psbend()
+l0: nop
+
+; @pt p5: tip.pgd(0: %l2)
+l1: syscall
+l2: hlt
+
+; @sb s2: pevent-comm.exec(1, 1, "foo", 0x2)
+; @sb s3: pevent-mmap-section(text_2, 1, 1, 0x2)
+
+section text_2 vstart=0x2000 start=0x1010
+; @pt p6: tsc(0x3)
+; @pt p7: tip.pge(3: %l3)
+l3: nop
+
+; @pt p8: fup(1: %l4)
+; @pt p9: tip.pgd(0: %l5)
+l4: nop
+l5: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, 4, 0, pevent-tip_pgd-comm_exec-mmap-tsc-tip_pge.bin { 0 }
+;%0s1 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0p2 tsc 1
+;%0p3 fup 3: %?l0
+;%0p4 psbend
+;%0p5 tip.pgd 0: %?l2.0
+;%0s2 PERF_RECORD_COMM.EXEC 1/1, "foo" { 2 }
+;%0s3 PERF_RECORD_MMAP 1/1, 2000, 3, 10, pevent-tip_pgd-comm_exec-mmap-tsc-tip_pge.bin { 2 }
+;%0p6 tsc 3
+;%0p7 tip.pge 3: %?l3
+;%0p8 fup 1: %?l4.2
+;%0p9 tip.pgd 0: %?l5.0
+
+
+; @pt .exp(ptxed)
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, 4, 0, pevent-tip_pgd-comm_exec-mmap-tsc-tip_pge.bin { 0 }
+;%0s1 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0l0 # nop
+;%0l1 # syscall
+;[disabled]
+;[enabled]
+;%0s2 PERF_RECORD_COMM.EXEC 1/1, "foo" { 2 }
+;%0s3 PERF_RECORD_MMAP 1/1, 2000, 3, 10, pevent-tip_pgd-comm_exec-mmap-tsc-tip_pge.bin { 2 }
+;%0l3 # nop
+;[disabled]
diff --git a/test/pevent/src/pevent-tip_pgd-mmap-tsc-tip_pge.ptt b/test/pevent/src/pevent-tip_pgd-mmap-tsc-tip_pge.ptt
new file mode 100644
index 000000000000..a1a3d0a0600c
--- /dev/null
+++ b/test/pevent/src/pevent-tip_pgd-mmap-tsc-tip_pge.ptt
@@ -0,0 +1,104 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MMAP in perf_event sideband.
+;
+; Variant: ring-3 tracing
+; perfect timing information
+;
+; opt:ptdump --sb:compact --sb:offset
+; opt:ptxed --sb:compact --sb:offset
+;
+
+org 0x1000
+bits 64
+
+
+; @sb primary(pevent)
+; @sb pevent-sample_type(time)
+; @sb s0: pevent-mmap-section(text, 1, 1, 0x0)
+; @sb s1: pevent-itrace-start(1, 1, 0x0)
+
+section text vstart=0x1000 start=0x1000
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: tsc(0x1)
+; @pt p3: fup(3: %l0)
+; @pt p4: psbend()
+l0: nop
+
+; @pt p5: tip.pgd(0: %l5)
+l1: syscall
+
+; @sb s2: pevent-mmap-section(foo, 1, 1, 0x2)
+
+; @pt p6: tsc(0x3)
+; @pt p7: tip.pge(3: %l2)
+l2: call l6
+;
+; compressed return
+; @pt p8: tnt(t)
+;
+l3: nop
+
+; @pt p9: fup(1: %l4)
+; @pt p10: tip.pgd(0: %l5)
+l4: nop
+l5: hlt
+
+section foo vstart=0xa000 start=0x1100
+l6: ret
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, b, 0, pevent-tip_pgd-mmap-tsc-tip_pge.bin { 0 }
+;%0s1 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0p2 tsc 1
+;%0p3 fup 3: %?l0
+;%0p4 psbend
+;%0p5 tip.pgd 0: %?l5.0
+;%0s2 PERF_RECORD_MMAP 1/1, a000, 1, 100, pevent-tip_pgd-mmap-tsc-tip_pge.bin { 2 }
+;%0p6 tsc 3
+;%0p7 tip.pge 3: %?l2
+;%0p8 tnt.8 !
+;%0p9 fup 1: %?l4.2
+;%0p10 tip.pgd 0: %?l5.0
+
+
+; @pt .exp(ptxed)
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, b, 0, pevent-tip_pgd-mmap-tsc-tip_pge.bin { 0 }
+;%0s1 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0l0 # nop
+;%0l1 # syscall
+;[disabled]
+;[resumed]
+;%0s2 PERF_RECORD_MMAP 1/1, a000, 1, 100, pevent-tip_pgd-mmap-tsc-tip_pge.bin { 2 }
+;%0l2 # call l6
+;%0l6 # ret
+;%0l3 # nop
+;[disabled]
diff --git a/test/pevent/src/pevent-tip_pgd-switch-tsc-tip_pge.ptt b/test/pevent/src/pevent-tip_pgd-switch-tsc-tip_pge.ptt
new file mode 100644
index 000000000000..3817cbf362b1
--- /dev/null
+++ b/test/pevent/src/pevent-tip_pgd-switch-tsc-tip_pge.ptt
@@ -0,0 +1,110 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test SWITCH in perf_event sideband.
+;
+; Variant: ring-3 tracing
+; perfect timing information
+;
+; opt:ptdump --sb:compact --sb:offset
+; opt:ptxed --sb:compact --sb:offset --sb:switch
+;
+
+org 0x1000
+bits 64
+
+
+; @sb primary(pevent)
+; @sb pevent-sample_type(tid, time)
+; @sb s0: pevent-mmap-section(text_1, 1, 1, 1, 1, 0x0)
+; @sb s1: pevent-mmap-section(text_2, 2, 2, 2, 2, 0x0)
+; @sb s2: pevent-itrace-start(1, 1, 1, 1, 0x0)
+
+section text_1 vstart=0x1000 start=0x1000
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: tsc(0x1)
+; @pt p3: fup(3: %l0)
+; @pt p4: psbend()
+l0: nop
+
+; @pt p5: fup(1: %l1)
+; @pt p6: tip.pgd(0: %l2)
+l1: nop
+l2: hlt
+
+; @sb s3: pevent-switch.out(1, 1, 0x2)
+; @sb s4: pevent-switch.in(3, 3, 0x3)
+; @sb s5: pevent-switch.out(3, 3, 0x2)
+; @sb s6: pevent-switch.in(2, 2, 0x3)
+
+section text_2 vstart=0x2000 start=0x1010
+; @pt p7: tsc(0x4)
+; @pt p8: tip.pge(3: %l3)
+l3: nop
+
+; @pt p9: fup(1: %l4)
+; @pt p10: tip.pgd(0: %l5)
+l4: nop
+l5: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, 3, 0, pevent-tip_pgd-switch-tsc-tip_pge.bin { 1/1 0 }
+;%0s1 PERF_RECORD_MMAP 2/2, 2000, 3, 10, pevent-tip_pgd-switch-tsc-tip_pge.bin { 2/2 0 }
+;%0s2 PERF_RECORD_ITRACE_START 1/1 { 1/1 0 }
+;%0p2 tsc 1
+;%0p3 fup 3: %?l0
+;%0p4 psbend
+;%0p5 fup 1: %?l1.2
+;%0p6 tip.pgd 0: %?l2.0
+;%0s3 PERF_RECORD_SWITCH.OUT { 1/1 2 }
+;%0s4 PERF_RECORD_SWITCH.IN { 3/3 3 }
+;%0s5 PERF_RECORD_SWITCH.OUT { 3/3 2 }
+;%0s6 PERF_RECORD_SWITCH.IN { 2/2 3 }
+;%0p7 tsc 4
+;%0p8 tip.pge 3: %?l3
+;%0p9 fup 1: %?l4.2
+;%0p10 tip.pgd 0: %?l5.0
+
+
+; @pt .exp(ptxed)
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, 3, 0, pevent-tip_pgd-switch-tsc-tip_pge.bin { 1/1 0 }
+;%0s1 PERF_RECORD_MMAP 2/2, 2000, 3, 10, pevent-tip_pgd-switch-tsc-tip_pge.bin { 2/2 0 }
+;%0s2 PERF_RECORD_ITRACE_START 1/1 { 1/1 0 }
+;[context: pid-1]
+;%0l0 # nop
+;[disabled]
+;[enabled]
+;%0s3 PERF_RECORD_SWITCH.OUT { 1/1 2 }
+;%0s4 PERF_RECORD_SWITCH.IN { 3/3 3 }
+;%0s5 PERF_RECORD_SWITCH.OUT { 3/3 2 }
+;%0s6 PERF_RECORD_SWITCH.IN { 2/2 3 }
+;[context: pid-2]
+;%0l3 # nop
+;[disabled]
diff --git a/test/pevent/src/pevent-tip_pgd-switch_cpu_wide-tsc-tip_pge.ptt b/test/pevent/src/pevent-tip_pgd-switch_cpu_wide-tsc-tip_pge.ptt
new file mode 100644
index 000000000000..ea38d69d1d8a
--- /dev/null
+++ b/test/pevent/src/pevent-tip_pgd-switch_cpu_wide-tsc-tip_pge.ptt
@@ -0,0 +1,109 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test SWITCH_CPU_WIDE in perf_event sideband.
+;
+; Variant: ring-3 tracing
+; timing is off - delay the event application
+;
+; opt:ptdump --sb:compact --sb:offset
+; opt:ptxed --event:tick --sb:compact --sb:offset --sb:switch
+;
+
+org 0x1000
+bits 64
+
+
+; @sb primary(pevent)
+; @sb pevent-sample_type(time)
+; @sb s0: pevent-mmap-section(text_1, 1, 1, 0x0)
+; @sb s1: pevent-mmap-section(text_2, 2, 2, 0x0)
+; @sb s2: pevent-itrace-start(1, 1, 0x0)
+
+section text_1 vstart=0x1000 start=0x1000
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: tsc(0x1)
+; @pt p3: fup(3: %l0)
+; @pt p4: psbend()
+l0: call rax
+l1: hlt
+
+; @sb s3: pevent-switch-cpu-wide.out(2, 2, 0x2)
+; @sb s4: pevent-switch-cpu-wide.in(1, 1, 0x3)
+
+; @pt p5: tsc(0x3)
+; @pt p6: tip(1: %l2)
+l2: nop
+
+; @pt p7: fup(1: %l3)
+; @pt p8: tip.pgd(0: %l3)
+l3: hlt
+
+section text_2 vstart=0x2000 start=0x1010
+; @pt p9: tip.pge(3: %l4)
+l4: nop
+
+; @pt p10: fup(1: %l5)
+; @pt p11: tip.pgd(0: %l5)
+l5: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, 5, 0, pevent-tip_pgd-switch_cpu_wide-tsc-tip_pge.bin { 0 }
+;%0s1 PERF_RECORD_MMAP 2/2, 2000, 2, 10, pevent-tip_pgd-switch_cpu_wide-tsc-tip_pge.bin { 0 }
+;%0s2 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;%0p2 tsc 1
+;%0p3 fup 3: %?l0
+;%0p4 psbend
+;%0s3 PERF_RECORD_SWITCH_CPU_WIDE.OUT 2/2 { 2 }
+;%0s4 PERF_RECORD_SWITCH_CPU_WIDE.IN 1/1 { 3 }
+;%0p5 tsc 3
+;%0p6 tip 1: %?l2.2
+;%0p7 fup 1: %?l3.2
+;%0p8 tip.pgd 0: %?l3.0
+;%0p9 tip.pge 3: %?l4
+;%0p10 fup 1: %?l5.2
+;%0p11 tip.pgd 0: %?l5.0
+
+
+; @pt .exp(ptxed)
+;%0s0 PERF_RECORD_MMAP 1/1, 1000, 5, 0, pevent-tip_pgd-switch_cpu_wide-tsc-tip_pge.bin { 0 }
+;%0s1 PERF_RECORD_MMAP 2/2, 2000, 2, 10, pevent-tip_pgd-switch_cpu_wide-tsc-tip_pge.bin { 0 }
+;%0s2 PERF_RECORD_ITRACE_START 1/1 { 0 }
+;[context: pid-1]
+;%0l0 # call rax
+;[tick]
+;%0s3 PERF_RECORD_SWITCH_CPU_WIDE.OUT 2/2 { 2 }
+;%0s4 PERF_RECORD_SWITCH_CPU_WIDE.IN 1/1 { 3 }
+;%0l2 # nop
+;[disabled]
+;[context: pid-2]
+;[enabled]
+;%0l4 # nop
+;[disabled]
diff --git a/test/pevent/src/pevent-warn.ptt b/test/pevent/src/pevent-warn.ptt
new file mode 100644
index 000000000000..4e903a6c8ea8
--- /dev/null
+++ b/test/pevent/src/pevent-warn.ptt
@@ -0,0 +1,79 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test sideband warnings.
+;
+; opt:ptdump --sb:compact --sb:offset
+; opt:ptxed --sb:warn
+;
+
+org 0x1000
+bits 64
+
+
+; @sb primary(pevent)
+; @sb pevent-sample_type(time)
+; @sb s0: pevent-aux(0x0, 0x200, 1, 0)
+; @sb s1: pevent-lost(1, 0x100, 0)
+; @sb s2: pevent-mmap(1, 2, 0xb000, 0x1000, 0, //anon, 0)
+; @sb s3: pevent-mmap(1, 2, 0xa000, 0x1000, 0, [vdso], 0)
+; @sb s4: pevent-mmap(1, 2, 0xa000, 0x1000, 0, [kernel.kallsyms]_text, 0)
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: tsc(1)
+; @pt p3: fup(3: %l0)
+; @pt p4: psbend()
+l0: nop
+
+; @pt p5: fup(1: %l1)
+; @pt p6: tip.pgd(0: %l1)
+l1: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0s0 PERF_RECORD_AUX.TRUNCATED 0, 200, 1 { 0 }
+;%0s1 PERF_RECORD_LOST 1, 100 { 0 }
+;%0s2 PERF_RECORD_MMAP 1/2, b000, 1000, 0, //anon { 0 }
+;%0s3 PERF_RECORD_MMAP 1/2, a000, 1000, 0, [vdso] { 0 }
+;%0s4 PERF_RECORD_MMAP 1/2, a000, 1000, 0, [kernel.kallsyms]_text { 0 }
+;%0p2 tsc 1
+;%0p3 fup 3: %?l0
+;%0p4 psbend
+;%0p5 fup 1: %?l1.2
+;%0p6 tip.pgd 0: %?l1.0
+
+
+; @pt .exp(ptxed)
+;[pevent-warn-pevent-primary,sample-type=0x4,time-zero=0x0,time-shift=0x0,time-mult=0x1.sb:%0s0 sideband warning: trace lost]
+;[pevent-warn-pevent-primary,sample-type=0x4,time-zero=0x0,time-shift=0x0,time-mult=0x1.sb:%0s1 sideband warning: sideband lost]
+;[pevent-warn-pevent-primary,sample-type=0x4,time-zero=0x0,time-shift=0x0,time-mult=0x1.sb:%0s2 sideband warning: image section lost]
+;[pevent-warn-pevent-primary,sample-type=0x4,time-zero=0x0,time-shift=0x0,time-mult=0x1.sb:%0s3 sideband error: bad configuration]
+;[pevent-warn-pevent-primary,sample-type=0x4,time-zero=0x0,time-shift=0x0,time-mult=0x1.sb:%0s4 sideband warning: image section lost]
+;%0l0 # nop
+;[disabled]
diff --git a/test/src/apl11.ptt b/test/src/apl11.ptt
new file mode 100644
index 000000000000..a0824a6f2b9a
--- /dev/null
+++ b/test/src/apl11.ptt
@@ -0,0 +1,80 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; APL11: Intel(R) PT OVF Pakcet May Be Followed by TIP.PGD Packet
+;
+; If Intel PT (Processor Trace) encounters an internal buffer overflow
+; and generates an OVF (Overflow) packet just as IA32_RTIT_CTL (MSR
+; 570H) bit 0 (TraceEn) is cleared, or during a far transfer that
+; causes IA32_RTIT_STATUS.ContextEn[1] (MSR 571H) to be cleared, the
+; OVF may be followed by a TIP.PGD (Target Instruction Pointer - Packet
+; Generation Disable) packet.
+;
+; cpu 6/92
+; cpu 6/95
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: hlt
+
+; @pt p4: ovf()
+; @pt p5: tip.pgd(3: %l1)
+l1: hlt
+
+; @pt p6: mode.exec(64bit)
+; @pt p7: tip.pge(3: %l2)
+l2: nop
+
+; @pt p8: fup(1: %l3)
+; @pt p9: tip.pgd(0: %l3)
+l3: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 ovf
+;%0p5 tip.pgd 3: %?l1
+;%0p6 mode.exec cs.l
+;%0p7 tip.pge 3: %?l2
+;%0p8 fup 1: %?l3.2
+;%0p9 tip.pgd 0: %?l3.0
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;[enabled]
+;[exec mode: 64-bit]
+;%0l2
+;[disabled]
diff --git a/test/src/apl12-psb.ptt b/test/src/apl12-psb.ptt
new file mode 100644
index 000000000000..0dc28b8ef653
--- /dev/null
+++ b/test/src/apl12-psb.ptt
@@ -0,0 +1,87 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; APL12: Intel(R) PT OVF May Be Followed By An Unexpected FUP Packet.
+;
+; Certain Intel PT (Processor Trace) packets including FUPs (Flow
+; Update Packets), should be issued only between TIP.PGE (Target IP
+; Packet - Packet Generaton Enable) and TIP.PGD (Target IP Packet -
+; Packet Generation Disable) packets. When outside a TIP.PGE/TIP.PGD
+; pair, as a result of IA32_RTIT_STATUS.FilterEn[0] (MSR 571H) being
+; cleared, an OVF (Overflow) packet may be unexpectedly followed by a
+; FUP.
+;
+; cpu 6/92
+; cpu 6/95
+;
+; Variant: Extra FUP followed by PSB+.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: hlt
+
+; @pt p4: ovf()
+; @pt p5: fup(3: %l1)
+l1: hlt
+
+; @pt p6: psb()
+; @pt p7: mode.exec(64bit)
+; @pt p8: psbend()
+
+; @pt p9: tip.pge(3: %l2)
+l2: nop
+
+; @pt p10: fup(1: %l3)
+; @pt p11: tip.pgd(0: %l3)
+l3: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 ovf
+;%0p5 fup 3: %?l1
+;%0p6 psb
+;%0p7 mode.exec cs.l
+;%0p8 psbend
+;%0p9 tip.pge 3: %?l2
+;%0p10 fup 1: %?l3.2
+;%0p11 tip.pgd 0: %?l3.0
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;[enabled]
+;%0l2
+;[disabled]
diff --git a/test/src/apl12-tip_pge.ptt b/test/src/apl12-tip_pge.ptt
new file mode 100644
index 000000000000..7966e40eee8b
--- /dev/null
+++ b/test/src/apl12-tip_pge.ptt
@@ -0,0 +1,89 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; APL12: Intel(R) PT OVF May Be Followed By An Unexpected FUP Packet.
+;
+; Certain Intel PT (Processor Trace) packets including FUPs (Flow
+; Update Packets), should be issued only between TIP.PGE (Target IP
+; Packet - Packet Generaton Enable) and TIP.PGD (Target IP Packet -
+; Packet Generation Disable) packets. When outside a TIP.PGE/TIP.PGD
+; pair, as a result of IA32_RTIT_STATUS.FilterEn[0] (MSR 571H) being
+; cleared, an OVF (Overflow) packet may be unexpectedly followed by a
+; FUP.
+;
+; cpu 6/92
+; cpu 6/95
+;
+; Variant: Extra FUP followed by TIP.PGE.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: hlt
+
+; @pt p4: ovf()
+; @pt p5: fup(3: %l1)
+l1: hlt
+
+; @pt p6: vmcs(0xa000)
+; @pt p7: pip(0xa000)
+; @pt p8: mode.exec(64bit)
+; @pt p9: tip.pge(3: %l2)
+l2: nop
+
+; @pt p10: fup(1: %l3)
+; @pt p11: tip.pgd(0: %l3)
+l3: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 ovf
+;%0p5 fup 3: %?l1
+;%0p6 vmcs a000 vmcs 000000000000a000
+;%0p7 pip a000 cr3 000000000000a000
+;%0p8 mode.exec cs.l
+;%0p9 tip.pge 3: %?l2
+;%0p10 fup 1: %?l3.2
+;%0p11 tip.pgd 0: %?l3.0
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;[vmcs, base: 000000000000a000]
+;[paging, cr3: 000000000000a000]
+;[enabled]
+;[exec mode: 64-bit]
+;%0l2
+;[disabled]
diff --git a/test/src/bad_cpu.ptt b/test/src/bad_cpu.ptt
new file mode 100644
index 000000000000..4a592d5acff6
--- /dev/null
+++ b/test/src/bad_cpu.ptt
@@ -0,0 +1,60 @@
+; Copyright (c) 2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that unknown cpus are diagnosed
+;
+; opt:ptdump --cpu 0/0
+; opt:ptxed --cpu 0/0
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: fup(3: %l0)
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+
+l0: nop
+
+; @pt p4: fup(1: %l1)
+; @pt p5: tip.pgd(0: %l1)
+l1: hlt
+
+
+; @pt .exp(ptdump)
+;[0: failed to determine errata: unknown cpu]
+;%0p0 psb
+;%0p1 fup 3: %?l0
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+;%0p4 fup 1: %?l1.2
+;%0p5 tip.pgd 0: %?l1.0
+
+; @pt .exp(ptxed)
+;[0, 0: config error: unknown cpu]
+;%0l0
+;[disabled]
diff --git a/test/src/bdm64-tip-xabort.ptt b/test/src/bdm64-tip-xabort.ptt
new file mode 100644
index 000000000000..060f54f26209
--- /dev/null
+++ b/test/src/bdm64-tip-xabort.ptt
@@ -0,0 +1,97 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; BDM64: An Incorrect LBR or Intel(R) Processor Trace Packet May Be
+; Recorded Following a Transactional Abort.
+;
+; Use of Intel(R) Transactional Synchronization Extensions (Intel(R) TSX)
+; may result in a transactional abort. If an abort occurs immediately
+; following a branch instruction, an incorrect branch target may be
+; logged in an LBR (Last Branch Record) or in an Intel(R) Processor Trace
+; (Intel(R) PT) packet before the LBR or Intel PT packet produced by the
+; abort.
+;
+; cpu 6/61
+; cpu 6/71
+; cpu 6/79
+; cpu 6/86
+;
+; Variant: indirect branch.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: mode.tsx(begin)
+; @pt p4: fup(3: %l1)
+; @pt p5: psbend()
+
+l1: jmp [rax]
+l2: hlt
+
+; @pt p6: tip(1: %l3)
+;
+; The branch destination is bad.
+
+l3: hlt
+l4: hlt
+
+; We immediately take an xabort from there
+;
+; @pt p7: mode.tsx(abort)
+; @pt p8: fup(1: %l3)
+; @pt p9: tip(1: %l5)
+
+l5: nop
+
+; @pt p10: fup(1: %l6)
+; @pt p11: tip.pgd(0: %l7)
+l6: nop
+l7: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 mode.tsx intx
+;%0p4 fup 3: %0l1
+;%0p5 psbend
+;%0p6 tip 1: %?l3.2
+;%0p7 mode.tsx abrt
+;%0p8 fup 1: %?l3.2
+;%0p9 tip 1: %?l5.2
+;%0p10 fup 1: %?l6.2
+;%0p11 tip.pgd 0: %?l7.0
+
+
+; @pt .exp(ptxed)
+;? %0l1 # jmp [rax]
+;[aborted]
+;[interrupt]
+;%0l5 # nop
+;[disabled]
diff --git a/test/src/bdm64-tnt-cond-xabort.ptt b/test/src/bdm64-tnt-cond-xabort.ptt
new file mode 100644
index 000000000000..f7ac7032b70b
--- /dev/null
+++ b/test/src/bdm64-tnt-cond-xabort.ptt
@@ -0,0 +1,107 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; BDM64: An Incorrect LBR or Intel(R) Processor Trace Packet May Be
+; Recorded Following a Transactional Abort.
+;
+; Use of Intel(R) Transactional Synchronization Extensions (Intel(R) TSX)
+; may result in a transactional abort. If an abort occurs immediately
+; following a branch instruction, an incorrect branch target may be
+; logged in an LBR (Last Branch Record) or in an Intel(R) Processor Trace
+; (Intel(R) PT) packet before the LBR or Intel PT packet produced by the
+; abort.
+;
+; cpu 6/61
+; cpu 6/71
+; cpu 6/79
+; cpu 6/86
+;
+; Variant: conditional branch followed by another conditional branch to
+; tell us that we're on the wrong track.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: mode.tsx(begin)
+; @pt p4: fup(3: %l1)
+; @pt p5: psbend()
+
+l1: test [rax], rbx
+l2: je l5
+
+; @pt p6: tnt(n)
+;
+; The branch destination is bad.
+;
+; There is no way for us to know this as long as we can reach the bad
+; branch destination without further Intel PT support.
+;
+; If we cannot reach it, however, we know that the branch was bad.
+;
+l3: nop
+l4: je l9
+
+l5: hlt
+l6: hlt
+
+; We immediately take an xabort from there
+;
+; @pt p7: mode.tsx(abort)
+; @pt p8: fup(1: %l5)
+; @pt p9: tip(1: %l7)
+
+l7: nop
+
+; @pt p10: fup(1: %l8)
+; @pt p11: tip.pgd(0: %l9)
+l8: nop
+l9: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 mode.tsx intx
+;%0p4 fup 3: %0l1
+;%0p5 psbend
+;%0p6 tnt.8 .
+;%0p7 mode.tsx abrt
+;%0p8 fup 1: %?l5.2
+;%0p9 tip 1: %?l7.2
+;%0p10 fup 1: %?l8.2
+;%0p11 tip.pgd 0: %?l9.0
+
+
+; @pt .exp(ptxed)
+;? %0l1 # test [rax], rbx
+;? %0l2 # je l5
+;[aborted]
+;[interrupt]
+;%0l7 # nop
+;[disabled]
diff --git a/test/src/bdm64-tnt-ind_call-xabort.ptt b/test/src/bdm64-tnt-ind_call-xabort.ptt
new file mode 100644
index 000000000000..3e007aaeb137
--- /dev/null
+++ b/test/src/bdm64-tnt-ind_call-xabort.ptt
@@ -0,0 +1,107 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; BDM64: An Incorrect LBR or Intel(R) Processor Trace Packet May Be
+; Recorded Following a Transactional Abort.
+;
+; Use of Intel(R) Transactional Synchronization Extensions (Intel(R) TSX)
+; may result in a transactional abort. If an abort occurs immediately
+; following a branch instruction, an incorrect branch target may be
+; logged in an LBR (Last Branch Record) or in an Intel(R) Processor Trace
+; (Intel(R) PT) packet before the LBR or Intel PT packet produced by the
+; abort.
+;
+; cpu 6/61
+; cpu 6/71
+; cpu 6/79
+; cpu 6/86
+;
+; Variant: conditional branch followed by an indirect call to tell us that
+; we're on the wrong track.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: mode.tsx(begin)
+; @pt p4: fup(3: %l1)
+; @pt p5: psbend()
+
+l1: test [rax], rbx
+l2: je l5
+
+; @pt p6: tnt(n)
+;
+; The branch destination is bad.
+;
+; There is no way for us to know this as long as we can reach the bad
+; branch destination without further Intel PT support.
+;
+; If we cannot reach it, however, we know that the branch was bad.
+;
+l3: nop
+l4: call rax
+
+l5: hlt
+l6: hlt
+
+; We immediately take an xabort from there
+;
+; @pt p7: mode.tsx(abort)
+; @pt p8: fup(1: %l5)
+; @pt p9: tip(1: %l7)
+
+l7: nop
+
+; @pt p10: fup(1: %l8)
+; @pt p11: tip.pgd(0: %l9)
+l8: nop
+l9: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 mode.tsx intx
+;%0p4 fup 3: %0l1
+;%0p5 psbend
+;%0p6 tnt.8 .
+;%0p7 mode.tsx abrt
+;%0p8 fup 1: %?l5.2
+;%0p9 tip 1: %?l7.2
+;%0p10 fup 1: %?l8.2
+;%0p11 tip.pgd 0: %?l9.0
+
+
+; @pt .exp(ptxed)
+;? %0l1 # test [rax], rbx
+;? %0l2 # je l5
+;[aborted]
+;[interrupt]
+;%0l7 # nop
+;[disabled]
diff --git a/test/src/bdm70-psb_fup-tip_pge.ptt b/test/src/bdm70-psb_fup-tip_pge.ptt
new file mode 100644
index 000000000000..160404d7a457
--- /dev/null
+++ b/test/src/bdm70-psb_fup-tip_pge.ptt
@@ -0,0 +1,79 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; BDM70: Intel(R) Processor Trace PSB+ Packets May Contain Unexpected Packets.
+;
+; Some Intel Processor Trace packets should be issued only between
+; TIP.PGE and TIP.PGD packets. Due to this erratum, when a TIP.PGE
+; packet is generated it may be preceded by a PSB+ that incorrectly
+; includes FUP and MODE.Exec packets.
+;
+; cpu 6/61
+; cpu 6/71
+; cpu 6/79
+; cpu 6/86
+; cpu 6/78
+; cpu 6/94
+; cpu 6/142
+; cpu 6/158
+;
+; Variant: sync at the PSB directly preceding the TIP.PGE.
+;
+; Tracing is already enabled after the sync and the explicit
+; enable event is suppressed as duplicate.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: fup(3: %l1)
+; @pt p4: mode.tsx(begin)
+; @pt p5: psbend()
+l1: nop
+; @pt p6: tip.pge(3: %l1)
+l2: nop
+; @pt p7: fup(1: %l2)
+; @pt p8: tip.pgd(0: %l3)
+l3: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 fup 3: %0l1
+;%0p4 mode.tsx intx
+;%0p5 psbend
+;%0p6 tip.pge 3: %0l1
+;%0p7 fup 1: %?l2.2
+;%0p8 tip.pgd 0: %?l3.0
+
+
+; @pt .exp(ptxed)
+;[enabled]
+;? %0l1 # nop
+;[disabled]
diff --git a/test/src/bdm70-tip_pgd-psb_fup-tip_pge.ptt b/test/src/bdm70-tip_pgd-psb_fup-tip_pge.ptt
new file mode 100644
index 000000000000..44e4de528f7f
--- /dev/null
+++ b/test/src/bdm70-tip_pgd-psb_fup-tip_pge.ptt
@@ -0,0 +1,97 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; BDM70: Intel(R) Processor Trace PSB+ Packets May Contain Unexpected Packets.
+;
+; Some Intel Processor Trace packets should be issued only between
+; TIP.PGE and TIP.PGD packets. Due to this erratum, when a TIP.PGE
+; packet is generated it may be preceded by a PSB+ that incorrectly
+; includes FUP and MODE.Exec packets.
+;
+; cpu 6/61
+; cpu 6/71
+; cpu 6/79
+; cpu 6/86
+; cpu 6/78
+; cpu 6/94
+; cpu 6/142
+; cpu 6/158
+;
+; Variant: sync at an earlier PSB.
+;
+; Process status updates in the PSB+ directly preceding TIP.PGE
+; with tracing still disabled.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: mode.tsx(begin)
+; @pt p4: fup(3: %l1)
+; @pt p5: psbend()
+l1: jle l4
+; @pt p6: tip.pgd(1: %l2)
+l2: nop
+
+; @pt p7: psb()
+; @pt p8: mode.exec(64bit)
+; @pt p9: fup(3: %l3)
+; @pt p10: mode.tsx(begin)
+; @pt p11: psbend()
+
+; @pt p12: tip.pge(3: %l3)
+l3: nop
+l4: nop
+; @pt p13: fup(1: %l4)
+; @pt p14: tip.pgd(0: %l5)
+l5: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 mode.tsx intx
+;%0p4 fup 3: %0l1
+;%0p5 psbend
+;%0p6 tip.pgd 1: %?l2.2
+;%0p7 psb
+;%0p8 mode.exec cs.l
+;%0p9 fup 3: %0l3
+;%0p10 mode.tsx intx
+;%0p11 psbend
+;%0p12 tip.pge 3: %0l3
+;%0p13 fup 1: %?l4.2
+;%0p14 tip.pgd 0: %?l5.0
+
+
+; @pt .exp(ptxed)
+;? %0l1 # jle l4
+;[disabled]
+;[enabled]
+;? %0l3 # nop
+;[disabled]
diff --git a/test/src/call_direct-ret_compressed-pic.ptt b/test/src/call_direct-ret_compressed-pic.ptt
new file mode 100644
index 000000000000..5c40581f61ae
--- /dev/null
+++ b/test/src/call_direct-ret_compressed-pic.ptt
@@ -0,0 +1,68 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a compressed return for a direct call
+;
+; Variant: ignore a call with zero displacement
+;
+
+org 0x100000
+bits 32
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(32bit)
+; @pt p4: psbend()
+l1: call l4
+l2: jz l6
+l3: hlt
+
+; @pt p5: tnt(t.t)
+l4: call l5
+l5: ret
+
+; @pt p6: fup(1: %l6)
+; @pt p7: tip.pgd(0: %l7)
+l6: nop
+l7: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %?l1
+;%0p3 mode.exec cs.d
+;%0p4 psbend
+;%0p5 tnt.8 !!
+;%0p6 fup 1: %?l6.2
+;%0p7 tip.pgd 0: %?l7.0
+
+
+; @pt .exp(ptxed)
+;%0l1 # call l4
+;%0l4 # call .
+;%0l5 # ret
+;%0l2 # jz l6
+;[disabled]
diff --git a/test/src/call_direct-ret_compressed.ptt b/test/src/call_direct-ret_compressed.ptt
new file mode 100644
index 000000000000..d245970db5bd
--- /dev/null
+++ b/test/src/call_direct-ret_compressed.ptt
@@ -0,0 +1,62 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a compressed return for a direct call
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+l1: call l4
+l2: jz l5
+l3: hlt
+l4: ret
+; @pt p5: tnt(t.t)
+l5: nop
+; @pt p6: fup(1: %l5)
+; @pt p7: tip.pgd(0: 0)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tnt.8 !!
+;%0p6 fup 1: %?l5.2
+;%0p7 tip.pgd 0: ????????????????
+
+
+; @pt .exp(ptxed)
+;%0l1 # call l4
+;%0l4 # ret
+;%0l2 # jz l5
+;[disabled]
diff --git a/test/src/call_direct-ret_uncompressed.ptt b/test/src/call_direct-ret_uncompressed.ptt
new file mode 100644
index 000000000000..72d737ea7d2b
--- /dev/null
+++ b/test/src/call_direct-ret_uncompressed.ptt
@@ -0,0 +1,61 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a non-compressed return for a direct call
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+l1: call l4
+l2: nop
+l3: nop
+l4: ret
+; @pt p5: tip(1: %l2)
+; @pt p6: fup(1: %l3)
+; @pt p7: tip.pgd(0: 0)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip 1: %?l2.2
+;%0p6 fup 1: %?l3.2
+;%0p7 tip.pgd 0: ????????????????
+
+
+; @pt .exp(ptxed)
+;%0l1 # call l4
+;%0l4 # ret
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/call_indirect-ret_compressed.ptt b/test/src/call_indirect-ret_compressed.ptt
new file mode 100644
index 000000000000..130cd3f25a05
--- /dev/null
+++ b/test/src/call_indirect-ret_compressed.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a compressed return for an indirect call
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+l1: call rbx
+; @pt p5: tip(1: %l4)
+l2: nop
+l3: hlt
+l4: ret
+; @pt p6: tnt(t)
+; @pt p7: fup(1: %l3)
+; @pt p8: tip.pgd(0: 0)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip 1: %?l4.2
+;%0p6 tnt.8 !
+;%0p7 fup 1: %?l3.2
+;%0p8 tip.pgd 0: ????????????????
+
+
+; @pt .exp(ptxed)
+;%0l1 # call rbx
+;%0l4 # ret
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/call_indirect-ret_uncompressed.ptt b/test/src/call_indirect-ret_uncompressed.ptt
new file mode 100644
index 000000000000..fc3432585920
--- /dev/null
+++ b/test/src/call_indirect-ret_uncompressed.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a non-compressed return for an indirect call
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+l1: call rbx
+; @pt p5: tip(1: %l4)
+l2: nop
+l3: nop
+l4: ret
+; @pt p6: tip(1: %l2)
+; @pt p7: fup(1: %l3)
+; @pt p8: tip.pgd(0: 0)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip 1: %?l4.2
+;%0p6 tip 1: %?l2.2
+;%0p7 fup 1: %?l3.2
+;%0p8 tip.pgd 0: ????????????????
+
+
+; @pt .exp(ptxed)
+;%0l1 # call rbx
+;%0l4 # ret
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/call_indirect_deferred-ret_compressed.ptt b/test/src/call_indirect_deferred-ret_compressed.ptt
new file mode 100644
index 000000000000..b0df53bb40ed
--- /dev/null
+++ b/test/src/call_indirect_deferred-ret_compressed.ptt
@@ -0,0 +1,65 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a compressed return for an indirect call with deferred tip
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+l1: call rbx
+; tip deferred
+l2: jz l5
+l3: hlt
+l4: ret
+; @pt p5: tnt(t.t)
+; @pt p6: tip(1: %l4)
+l5: nop
+; @pt p7: fup(1: %l5)
+; @pt p8: tip.pgd(0: 0)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tnt.8 !!
+;%0p6 tip 1: %?l4.2
+;%0p7 fup 1: %?l5.2
+;%0p8 tip.pgd 0: ????????????????
+
+
+; @pt .exp(ptxed)
+;%0l1 # call rbx
+;%0l4 # ret
+;%0l2 # jz l5
+;[disabled]
diff --git a/test/src/cbr-cyc.ptt b/test/src/cbr-cyc.ptt
new file mode 100644
index 000000000000..65905e7f5a8d
--- /dev/null
+++ b/test/src/cbr-cyc.ptt
@@ -0,0 +1,55 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test CYC-based timing.
+;
+; Variant: CBR-based calibration
+;
+; opt:ptdump --time --time-delta --no-wall-clock
+; opt:ptdump --nom-freq 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: cbr(0x2)
+; @pt p3: psbend()
+
+; @pt p4: cyc(0x3)
+; @pt p5: cyc(0x1)
+
+; @pt p6: cbr(0x1)
+; @pt p7: cyc(0x2)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 cbr 2
+;%0p3 psbend
+;%0p4 cyc 3 tsc +6
+;%0p5 cyc 1 tsc +2
+;%0p6 cbr 1
+;%0p7 cyc 2 tsc +8
diff --git a/test/src/cbr-mtc-cyc-mtc.ptt b/test/src/cbr-mtc-cyc-mtc.ptt
new file mode 100644
index 000000000000..000ca36400be
--- /dev/null
+++ b/test/src/cbr-mtc-cyc-mtc.ptt
@@ -0,0 +1,54 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based timing.
+;
+; Variant: CBR-based calibration, time correction on MTC
+;
+; opt:ptdump --time --time-delta --no-wall-clock
+; opt:ptdump --nom-freq 4 --mtc-freq 0 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: cbr(0x2)
+; @pt p3: psbend()
+
+; @pt p4: mtc(0x2)
+; @pt p5: cyc(0x3)
+; @pt p6: cyc(0x1)
+; @pt p7: mtc(0x3)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 cbr 2
+;%0p3 psbend
+;%0p4 mtc 2 tsc +0
+;%0p5 cyc 3 tsc +6
+;%0p6 cyc 1 tsc +2
+;%0p7 mtc 3 tsc -4
diff --git a/test/src/cbr-tsc-cyc-tma.ptt b/test/src/cbr-tsc-cyc-tma.ptt
new file mode 100644
index 000000000000..17f3944f0990
--- /dev/null
+++ b/test/src/cbr-tsc-cyc-tma.ptt
@@ -0,0 +1,57 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test CYC-based TSC estimation.
+;
+; Variant: CBR-based calibration,
+; CYC between TSC and TMA
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --nom-freq 1 --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: cbr(0x2)
+; @pt p3: psbend()
+
+; @pt p4: tsc(0xa0000)
+; @pt p5: cyc(0x6)
+; @pt p6: tma(0x102, 0x8)
+; @pt p7: cyc(0x8)
+; @pt p8: cyc(0x4)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 cbr 2
+;%0p3 psbend
+;%0p4 tsc a0000 tsc +a0000
+;%0p5 cyc 6 tsc +3
+;%0p6 tma 102, 8 tsc +0
+;%0p7 cyc 8 tsc +4
+;%0p8 cyc 4 tsc +2
diff --git a/test/src/cbr-tsc-tma-mtc-cyc.ptt b/test/src/cbr-tsc-tma-mtc-cyc.ptt
new file mode 100644
index 000000000000..97b657ccade9
--- /dev/null
+++ b/test/src/cbr-tsc-tma-mtc-cyc.ptt
@@ -0,0 +1,56 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based TSC estimation.
+;
+; Variant: CBR-based calibration, CBR before TSC
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --nom-freq 4 --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: cbr(0x2)
+; @pt p4: tsc(0xa0000)
+; @pt p5: tma(0x102, 0x8)
+; @pt p6: mtc(0x2)
+; @pt p7: cyc(0x3)
+; @pt p8: cyc(0x1)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 cbr 2
+;%0p4 tsc a0000 tsc +a0000
+;%0p5 tma 102, 8 tsc +0
+;%0p6 mtc 2 tsc +3f0
+;%0p7 cyc 3 tsc +6
+;%0p8 cyc 1 tsc +2
diff --git a/test/src/direct_call-tip_pgd_noip-syscall.ptt b/test/src/direct_call-tip_pgd_noip-syscall.ptt
new file mode 100644
index 000000000000..9f058039ff14
--- /dev/null
+++ b/test/src/direct_call-tip_pgd_noip-syscall.ptt
@@ -0,0 +1,60 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TIP.PGD with suppressed IP payload is applied to the next far branch
+; (syscall in this case).
+;
+; Variant: there's a direct call on our way to the syscall.
+; test that the disable event is not applied too early.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: call l3
+l2: hlt
+
+l3: syscall
+l4: ret
+; @pt p5: tip.pgd(0: %l4)
+
+
+; @pt .exp(ptxed)
+;%0l1 # call l3
+;%0l3 # syscall
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip.pgd 0: %?l4.0
diff --git a/test/src/direct_jump-tip_pgd_noip-far_call.ptt b/test/src/direct_jump-tip_pgd_noip-far_call.ptt
new file mode 100644
index 000000000000..b8fe8e22edab
--- /dev/null
+++ b/test/src/direct_jump-tip_pgd_noip-far_call.ptt
@@ -0,0 +1,61 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TIP.PGD with suppressed IP payload is applied to the next far branch
+; (far call in this case).
+;
+; Variant: there's a direct jump on our way to the far call.
+; test that the disable event is not applied too early.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: jmp l3
+l2: hlt
+
+l3: call far [rax] ; l5
+l4: hlt
+; @pt p5: tip.pgd(0: %l5)
+
+l5: hlt
+
+; @pt .exp(ptxed)
+;%0l1 # jmp l3
+;%0l3 # call far [rax] ; l5
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip.pgd 0: %?l5.0
diff --git a/test/src/dump-all-packets.ptt b/test/src/dump-all-packets.ptt
new file mode 100644
index 000000000000..a3f11a68c318
--- /dev/null
+++ b/test/src/dump-all-packets.ptt
@@ -0,0 +1,143 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test dumping of all packets.
+;
+; opt:ptdump --no-sync
+
+org 0x42
+bits 64
+
+; @pt p01: psbend()
+; @pt p02: psb()
+; @pt p03: pad()
+; @pt p04: ovf()
+; @pt p05: tnt(tnnnt)
+; @pt p06: tnt64(tnntnntnnttnntnntnnttnntnntnnttnntnntnnttnnttnt)
+; @pt p07: tip(3: 0xffffffffffff1000)
+; @pt p08: tip(3: 0x1000)
+; @pt p09: tip(2: 0x1001)
+; @pt p10: tip(1: 0x1002)
+; @pt p11: tip(0: 0x1003)
+; @pt p12: tip.pge(3: 0xffffffffffff2000)
+; @pt p13: tip.pge(3: 0x2000)
+; @pt p14: tip.pge(2: 0x2001)
+; @pt p15: tip.pge(1: 0x2002)
+; @pt p16: tip.pge(0: 0x2003)
+; @pt p17: tip.pgd(3: 0xffffffffffff3000)
+; @pt p18: tip.pgd(3: 0x3000)
+; @pt p19: tip.pgd(2: 0x3001)
+; @pt p20: tip.pgd(1: 0x3002)
+; @pt p21: tip.pgd(0: 0x3003)
+; @pt p22: fup(3: 0xffffffffffff4000)
+; @pt p23: fup(3: 0x4000)
+; @pt p24: fup(2: 0x4001)
+; @pt p25: fup(1: 0x4002)
+; @pt p26: fup(0: 0x4003)
+; @pt p27: mode.exec(16bit)
+; @pt p28: mode.exec(32bit)
+; @pt p29: mode.exec(64bit)
+; @pt p30: mode.tsx(begin)
+; @pt p31: mode.tsx(commit)
+; @pt p32: mode.tsx(abort)
+; @pt p33: pip(0xafafaf)
+; @pt p34: pip(0xafafaf, nr)
+; @pt p35: tsc(0x12345)
+; @pt p36: cbr(0x24)
+; @pt p37: tma(0x12, 0x34)
+; @pt p38: mtc(0x94)
+; @pt p39: cyc(0x57)
+; @pt p40: stop()
+; @pt p41: vmcs(0xabcd000)
+; @pt p42: mnt(0x2a2242e5d4c3b2a1)
+; @pt p43: tip(6: 0x00cccccccccc4000)
+; @pt p44: tip(4: 0xffffffff4000)
+; @pt p45: exstop()
+; @pt p46: exstop(ip)
+; @pt p47: mwait(0xb, 0x1)
+; @pt p48: pwre(c0.0)
+; @pt p49: pwre(c0.0, hw)
+; @pt p50: pwrx(int: c1, c6)
+; @pt p51: pwrx(st: c1, c6)
+; @pt p52: pwrx(hw: c1, c6)
+; @pt p53: ptw(0: 0x90abcdef, ip)
+; @pt p54: ptw(1: 0x1234567890abcdef)
+
+; @pt .exp(ptdump)
+;%0p01 psbend
+;%0p02 psb
+;%0p03 pad
+;%0p04 ovf
+;%0p05 tnt.8 !...!
+;%0p06 tnt.64 !..!..!..!!..!..!..!!..!..!..!!..!..!..!!..!!.!
+;%0p07 tip 3: ffffffffffff1000
+;%0p08 tip 3: 0000000000001000
+;%0p09 tip 2: ????????00001001
+;%0p10 tip 1: ????????????1002
+;%0p11 tip 0: ????????????????
+;%0p12 tip.pge 3: ffffffffffff2000
+;%0p13 tip.pge 3: 0000000000002000
+;%0p14 tip.pge 2: ????????00002001
+;%0p15 tip.pge 1: ????????????2002
+;%0p16 tip.pge 0: ????????????????
+;%0p17 tip.pgd 3: ffffffffffff3000
+;%0p18 tip.pgd 3: 0000000000003000
+;%0p19 tip.pgd 2: ????????00003001
+;%0p20 tip.pgd 1: ????????????3002
+;%0p21 tip.pgd 0: ????????????????
+;%0p22 fup 3: ffffffffffff4000
+;%0p23 fup 3: 0000000000004000
+;%0p24 fup 2: ????????00004001
+;%0p25 fup 1: ????????????4002
+;%0p26 fup 0: ????????????????
+;%0p27 mode.exec
+;%0p28 mode.exec cs.d
+;%0p29 mode.exec cs.l
+;%0p30 mode.tsx intx
+;%0p31 mode.tsx
+;%0p32 mode.tsx abrt
+;%0p33 pip afafa0 cr3 0000000000afafa0
+;%0p34 pip afafa0, nr cr3 0000000000afafa0
+;%0p35 tsc 12345
+;%0p36 cbr 24
+;%0p37 tma 12, 34
+;%0p38 mtc 94
+;%0p39 cyc 57
+;%0p40 stop
+;%0p41 vmcs abcd000 vmcs 000000000abcd000
+;%0p42 mnt 2a2242e5d4c3b2a1
+;%0p43 tip 6: 00cccccccccc4000
+;%0p44 tip 4: ????ffffffff4000
+;%0p45 exstop
+;%0p46 exstop ip
+;%0p47 mwait 0000000b, 00000001
+;%0p48 pwre c0.0
+;%0p49 pwre c0.0, hw
+;%0p50 pwrx int: c1, c6
+;%0p51 pwrx st: c1, c6
+;%0p52 pwrx hw: c1, c6
+;%0p53 ptw 0: 90abcdef, ip
+;%0p54 ptw 1: 1234567890abcdef
diff --git a/test/src/exstop_ip-tip_pgd.ptt b/test/src/exstop_ip-tip_pgd.ptt
new file mode 100644
index 000000000000..24c413eda887
--- /dev/null
+++ b/test/src/exstop_ip-tip_pgd.ptt
@@ -0,0 +1,65 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a power event session.
+;
+; Variant: tracing is disabled at the same IP as the power session.
+;
+; opt:ptxed --event:ip
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+
+; @pt p4: exstop(ip)
+; @pt p5:fup(1: %l1)
+l1: nop
+
+; @pt p6:fup(1: %l1)
+; @pt p7:tip.pgd(0: %l2)
+l2: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 exstop ip
+;%0p5 fup 1: %?l1.2
+;%0p6 fup 1: %?l1.2
+;%0p7 tip.pgd 0: %?l2.0
+
+; @pt .exp(ptxed)
+;%0l0
+;[exstop, ip: %0l1]
+;[disabled, at: %0l1]
diff --git a/test/src/fup-pip-vmcs-tip.ptt b/test/src/fup-pip-vmcs-tip.ptt
new file mode 100644
index 000000000000..2f683337427f
--- /dev/null
+++ b/test/src/fup-pip-vmcs-tip.ptt
@@ -0,0 +1,71 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a PIP and VMCS binding to an in-flight asynchronous branch.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+
+; @pt p4: fup(1: %l1)
+l1: hlt
+
+; @pt p5: pip(0xcdcdc0)
+; @pt p6: vmcs(0xcdcdc000)
+; @pt p7: tip(3: %l2)
+l2: nop
+
+; @pt p8:fup(1: %l3)
+; @pt p9:tip.pgd(0: %l3)
+l3: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 fup 1: %?l1.2
+;%0p5 pip cdcdc0 cr3 0000000000cdcdc0
+;%0p6 vmcs cdcdc000 vmcs 00000000cdcdc000
+;%0p7 tip 3: %?l2
+;%0p8 fup 1: %?l3.2
+;%0p9 tip.pgd 0: %?l3.0
+
+
+; @pt .exp(ptxed)
+;%0l0
+;[interrupt]
+;[vmcs, base: 00000000cdcdc000]
+;[paging, cr3: 0000000000cdcdc0]
+;%0l2
+;[disabled]
diff --git a/test/src/fup-pip-vmcs-tip_pgd.ptt b/test/src/fup-pip-vmcs-tip_pgd.ptt
new file mode 100644
index 000000000000..47b10f390762
--- /dev/null
+++ b/test/src/fup-pip-vmcs-tip_pgd.ptt
@@ -0,0 +1,65 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a PIP and VMCS binding to an in-flight asynchronous branch.
+;
+; Variant: the async branch disables tracing
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+
+; @pt p4: fup(1: %l1)
+l1: hlt
+
+; @pt p5: pip(0xcdcdc0)
+; @pt p6: vmcs(0xcdcdc000)
+; @pt p7: tip.pgd(3: %l2)
+l2: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 fup 1: %?l1.2
+;%0p5 pip cdcdc0 cr3 0000000000cdcdc0
+;%0p6 vmcs cdcdc000 vmcs 00000000cdcdc000
+;%0p7 tip.pgd 3: %?l2
+
+
+; @pt .exp(ptxed)
+;%0l0
+;[disabled]
+;[vmcs, base: 00000000cdcdc000]
+;[paging, cr3: 0000000000cdcdc0]
diff --git a/test/src/fup-tip-eos.ptt b/test/src/fup-tip-eos.ptt
new file mode 100644
index 000000000000..c4e232401307
--- /dev/null
+++ b/test/src/fup-tip-eos.ptt
@@ -0,0 +1,58 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that we indicate the end of the trace without a TIP.PGD.
+;
+; Variant: the trace ends after an asynchronous branch
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+l1: hlt
+
+; @pt p4:fup(1: %l1)
+; @pt p5:tip(1: %l2)
+l2: hlt
+
+
+; @pt .exp(ptxed)
+;%0l0
+;[interrupt]
+;[end of trace]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %0l0
+;%0p3 psbend
+;%0p4 fup 1: %?l1.2
+;%0p5 tip 1: %?l2.2
diff --git a/test/src/fup-tip-fup-tip_pgd.ptt b/test/src/fup-tip-fup-tip_pgd.ptt
new file mode 100644
index 000000000000..93ccfb21f0a9
--- /dev/null
+++ b/test/src/fup-tip-fup-tip_pgd.ptt
@@ -0,0 +1,67 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that FUP + TIP.PGD disables tracing as part of the asynchronous
+; branch.
+;
+; Variant: Tracing is disabled after an interrupt before executing the first
+; instruction of the interrupt handler (e.g. due to another interrupt).
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: nop
+l2: hlt
+
+; @pt p5: fup(1: %l2)
+; @pt p6: tip(3: %l3)
+l3: hlt
+l4: hlt
+
+; @pt p7: fup(1: %l3)
+; @pt p8: tip.pgd(0: %l4)
+
+
+; @pt .exp(ptxed)
+;%0l1 # nop
+;[interrupt]
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 fup 1: %?l2.2
+;%0p6 tip 3: %?l3
+;%0p7 fup 1: %?l3.2
+;%0p8 tip.pgd 0: %?l4.0
diff --git a/test/src/fup-tip.ptt b/test/src/fup-tip.ptt
new file mode 100644
index 000000000000..64ce04de0307
--- /dev/null
+++ b/test/src/fup-tip.ptt
@@ -0,0 +1,70 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a combinarion of FUP + TIP representing an asynchronous branch such
+; as an interrupt with both ring-0 and ring-3 traced.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0:psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2:psbend()
+; @pt p3:tip.pge(3: %l0)
+l0: nop
+l1: nop
+
+; The fup contains the IP of the instruction after the last one that was
+; executed.
+; @pt p4:fup(3: %l2)
+l2: nop
+
+; @pt p5:tip(3: %l3)
+l3: nop
+
+; @pt p6:fup(3: %l4)
+; @pt p7:tip.pgd(0: 0)
+l4: nop
+
+
+; @pt .exp(ptxed)
+;[enabled]
+;%0l0
+;%0l1
+;[interrupt]
+;%0l3
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 psbend
+;%0p3 tip.pge 3: %0l0
+;%0p4 fup 3: %0l2
+;%0p5 tip 3: %0l3
+;%0p6 fup 3: %0l4
+;%0p7 tip.pgd 0: ????????????????
diff --git a/test/src/fup-tip_pgd-stop.ptt b/test/src/fup-tip_pgd-stop.ptt
new file mode 100644
index 000000000000..ec9b67151470
--- /dev/null
+++ b/test/src/fup-tip_pgd-stop.ptt
@@ -0,0 +1,60 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TraceStop is applied to the same instruction as a preceding TIP.PGD.
+;
+; Variant: encountered during normal tracing.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: fup(3: %l1)
+; @pt p4: psbend()
+l1: nop
+
+
+; @pt p5: fup(1: %l2)
+; @pt p6: tip.pgd(0: %l3)
+; @pt p7: stop()
+l2: nop
+l3: hlt
+
+; @pt .exp(ptxed)
+;%0l1 # nop
+;[disabled]
+;[stopped]
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 fup 3: %0l1
+;%0p4 psbend
+;%0p5 fup 1: %?l2.2
+;%0p6 tip.pgd 0: %?l3.0
+;%0p7 stop
diff --git a/test/src/fup-tip_pgd-tip_pge.ptt b/test/src/fup-tip_pgd-tip_pge.ptt
new file mode 100644
index 000000000000..fd37b6513823
--- /dev/null
+++ b/test/src/fup-tip_pgd-tip_pge.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a combination of enable and async disable on the same IP.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: fup(3: %l1)
+; @pt p4: psbend()
+l1: nop
+; @pt p5: fup(1: %l1)
+; @pt p6: tip.pgd(0: %l1)
+; @pt p7: tip.pge(3: %l1)
+l2: nop
+; @pt p8: fup(1: %l2)
+; @pt p9: tip.pgd(0: %l3)
+l3: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 fup 3: %0l1
+;%0p4 psbend
+;%0p5 fup 1: %?l1.2
+;%0p6 tip.pgd 0: %?l1.0
+;%0p7 tip.pge 3: %0l1
+;%0p8 fup 1: %?l2.2
+;%0p9 tip.pgd 0: %?l3.0
+
+
+; @pt .exp(ptxed)
+;[disabled]
+;[resumed]
+;%0l1 # nop
+;[disabled]
diff --git a/test/src/fup-tip_pgd-tip_pge_other_ip.ptt b/test/src/fup-tip_pgd-tip_pge_other_ip.ptt
new file mode 100644
index 000000000000..8a6390e45f15
--- /dev/null
+++ b/test/src/fup-tip_pgd-tip_pge_other_ip.ptt
@@ -0,0 +1,66 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a combination of async disable and enable on a different IP.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: fup(3: %l1)
+; @pt p4: psbend()
+l1: nop
+l2: hlt
+; @pt p5: fup(1: %l2)
+; @pt p6: tip.pgd(0: %l2)
+l3: nop
+; @pt p7: tip.pge(3: %l3)
+l4: nop
+; @pt p8: fup(1: %l4)
+; @pt p9: tip.pgd(0: %l5)
+l5: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 fup 3: %0l1
+;%0p4 psbend
+;%0p5 fup 1: %?l2.2
+;%0p6 tip.pgd 0: %?l2.0
+;%0p7 tip.pge 3: %0l3
+;%0p8 fup 1: %?l4.2
+;%0p9 tip.pgd 0: %?l5.0
+
+
+; @pt .exp(ptxed)
+;%0l1 # nop
+;[disabled]
+;[enabled]
+;%0l3 # nop
+;[disabled]
diff --git a/test/src/fup-tip_pgd.ptt b/test/src/fup-tip_pgd.ptt
new file mode 100644
index 000000000000..91933e83a44c
--- /dev/null
+++ b/test/src/fup-tip_pgd.ptt
@@ -0,0 +1,56 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that FUP + TIP.PGD disables tracing as part of the asynchronous
+; branch.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: nop
+l2: nop
+l3: nop
+; @pt p5: fup(1: %l2)
+; @pt p6: tip.pgd(3: %l3)
+
+
+; @pt .exp(ptxed)
+;%0l1 # nop
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 fup 1: %?l2.2
+;%0p6 tip.pgd 3: %?l3
diff --git a/test/src/fup-tip_pgd_noip.ptt b/test/src/fup-tip_pgd_noip.ptt
new file mode 100644
index 000000000000..0139b96a9cb0
--- /dev/null
+++ b/test/src/fup-tip_pgd_noip.ptt
@@ -0,0 +1,56 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that FUP + TIP.PGD disables tracing as part of the asynchronous
+; branch (with suppressed TIP.PGD payload in this case).
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: nop
+l2: nop
+l3: nop
+; @pt p5: fup(1: %l2)
+; @pt p6: tip.pgd(0: %l3)
+
+
+; @pt .exp(ptxed)
+;%0l1 # nop
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 fup 1: %?l2.2
+;%0p6 tip.pgd 0: %?l3.0
diff --git a/test/src/int-iret-cpl_0.ptt b/test/src/int-iret-cpl_0.ptt
new file mode 100644
index 000000000000..4449d74531ac
--- /dev/null
+++ b/test/src/int-iret-cpl_0.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that an INT followed by an IRET are decoded correctly.
+;
+; Variant: cpl 3 filtered out
+;
+
+org 0x100000
+bits 64
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+
+; @pt p4: tip.pge(3: %l5)
+
+l1: int 42
+l2: nop
+l3: nop
+l4: hlt
+
+l5: nop
+l6: iret
+l7: hlt
+
+; @pt p5: tip.pgd(0: %l2)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+;%0p4 tip.pge 3: %0l5
+;%0p5 tip.pgd 0: %?l2.0
+
+; @pt .exp(ptxed)
+;[enabled]
+;%0l5 # nop
+;%0l6 # iret
+;[disabled]
diff --git a/test/src/int-iret-cpl_3.ptt b/test/src/int-iret-cpl_3.ptt
new file mode 100644
index 000000000000..ab7202b4f3e0
--- /dev/null
+++ b/test/src/int-iret-cpl_3.ptt
@@ -0,0 +1,94 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that an INT followed by an IRET are decoded correctly.
+;
+; Variant: cpl 0 filtered out
+;
+;
+; Software interrupts receive FUP(CLIP) + TIP(BLIP?)
+;
+; We can not reliably determine whether the FUP/TIP belongs to the
+; software interrupt or to an asynchronous interrupt that was taken
+; before the instruction.
+;
+; To distinguish the two cases we would need to read ahead but that
+; may require decoding an unknown amount of code (e.g. in different
+; processes for system-wide ring-3 tracing) until we return either
+; to CLIP if it was an asynchronous interrupt or to NLIP (or even to
+; a later IP) if it wasn't.
+;
+; Instead, we assume that it was an asynchronous interrupt. Tracing
+; appears to get disabled before and re-enabled after the instruction.
+;
+; This is wrong most of the time. But it is predictably wrong and it
+; avoids the case where we incorrectly assume a synchronous transfer
+; and get out of sync.
+;
+
+org 0x100000
+bits 64
+; @pt p1: psb()
+; @pt p2: fup(3: %l0)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l0: nop
+
+; @pt p5: fup(1: %l1)
+; @pt p6: tip.pgd(0: %l5)
+
+l1: int 42
+l2: nop
+l3: nop
+l4: hlt
+
+l5: nop
+l6: iret
+l7: hlt
+
+; @pt p7: tip.pge(3: %l2)
+
+; @pt p8: fup(1: %l3)
+; @pt p9: tip.pgd(0: %l4)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l0
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 fup 1: %?l1.2
+;%0p6 tip.pgd 0: %?l5.0
+;%0p7 tip.pge 3: %?l2
+;%0p8 fup 1: %?l3.2
+;%0p9 tip.pgd 0: %?l4.0
+
+; @pt .exp(ptxed)
+;%0l0 # nop - missing: l1: int 42
+;[disabled]
+;[enabled]
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/int-iret.ptt b/test/src/int-iret.ptt
new file mode 100644
index 000000000000..1702bfdeb1f9
--- /dev/null
+++ b/test/src/int-iret.ptt
@@ -0,0 +1,96 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that an INT followed by an IRET are decoded correctly.
+;
+; Variant: no cpl filtering
+;
+;
+; Software interrupts receive FUP(CLIP) + TIP(BLIP?)
+;
+; We can not reliably determine whether the FUP/TIP belongs to the
+; software interrupt or to an asynchronous interrupt that was taken
+; before the instruction.
+;
+; To distinguish the two cases we would need to read ahead but that
+; may require decoding an unknown amount of code (in the kernel or
+; hypervisor or even in different processes) until we return either
+; to CLIP if it was an asynchronous interrupt or to NLIP (or even to
+; a later IP) if it wasn't.
+;
+; Instead, we assume that it was an asynchronous interrupt. Control
+; appears to flow from before the software interrupt instruction to
+; the interrupt handler and back after the instruction.
+;
+; This is wrong most of the time. But it is predictably wrong and it
+; avoids the case where we incorrectly assume a synchronous transfer
+; and get out of sync.
+;
+
+org 0x100000
+bits 64
+; @pt p1: psb()
+; @pt p2: fup(3: %l0)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l0: nop
+
+; @pt p5: fup(1: %l1)
+; @pt p6: tip(3: %l5)
+
+l1: int 42
+l2: nop
+l3: nop
+l4: hlt
+
+l5: nop
+l6: iret
+l7: hlt
+
+; @pt p7: tip(3: %l2)
+
+; @pt p8: fup(1: %l3)
+; @pt p9: tip.pgd(0: %l4)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l0
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 fup 1: %?l1.2
+;%0p6 tip 3: %0l5
+;%0p7 tip 3: %0l2
+;%0p8 fup 1: %?l3.2
+;%0p9 tip.pgd 0: %?l4.0
+
+; @pt .exp(ptxed)
+;%0l0 # nop
+;[interrupt] # this should really be: l1: int 42
+;%0l5 # nop
+;%0l6 # iret
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/linear-fup-tip_pgd.ptt b/test/src/linear-fup-tip_pgd.ptt
new file mode 100644
index 000000000000..812edde60bd1
--- /dev/null
+++ b/test/src/linear-fup-tip_pgd.ptt
@@ -0,0 +1,59 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a rather long linear trace. To keep the test file small, we only check
+; the number of instructions.
+;
+; opt:ptxed --quiet --stat --stat:insn
+;
+; Variant: linear trace ends with disabled event.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l0)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l0: times 100000 nop
+
+l1: hlt
+; @pt p5: fup(2: %l1)
+; @pt p6: tip.pgd(0: %l1)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %?l0
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 fup 2: %?l1.4
+;%0p6 tip.pgd 0: %?l1.0
+
+
+; @pt .exp(ptxed)
+;insn: 100000.
diff --git a/test/src/linear-tip.ptt b/test/src/linear-tip.ptt
new file mode 100644
index 000000000000..4090eab9ad4a
--- /dev/null
+++ b/test/src/linear-tip.ptt
@@ -0,0 +1,65 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a rather long linear trace. To keep the test file small, we only check
+; the number of instructions.
+;
+; opt:ptxed --quiet --stat --stat:insn
+;
+; Variant: linear trace ends with an indirect branch.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l0)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l0: times 50000 nop
+
+; @pt p5: tip(2: %l0)
+; @pt p6: tip(2: %l2)
+l1: jmp rax
+
+; @pt p7: fup(1: %l2)
+; @pt p8: tip.pgd(0: %l2)
+l2: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %?l0
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip 2: %?l0.4
+;%0p6 tip 2: %?l2.4
+;%0p7 fup 1: %?l2.2
+;%0p8 tip.pgd 0: %?l2.0
+
+
+; @pt .exp(ptxed)
+;insn: 100002.
diff --git a/test/src/loop-tnt-64.ptt b/test/src/loop-tnt-64.ptt
new file mode 100644
index 000000000000..1fd1a37e3365
--- /dev/null
+++ b/test/src/loop-tnt-64.ptt
@@ -0,0 +1,193 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a big for loop
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+l1: mov rax, 0x0
+l2: jmp l4
+l3: add rax, 0x1
+l4: cmp rax, 0x2a
+l5: jle l3
+; @pt p5: tnt64(tttttttttttttttttttttttttttttttttttttttttttn)
+; @pt p6: fup(3: %l6)
+; @pt p7: tip.pgd(0: 0)
+l6: leave
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tnt.64 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!.
+;%0p6 fup 3: %0l6
+;%0p7 tip.pgd 0: ????????????????
+
+
+; @pt .exp(ptxed)
+;%0l1 # mov rax, 0x0
+;%0l2 # jmp l4
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x2a
+;%0l5 # jle l3
+;[disabled]
diff --git a/test/src/loop-tnt-tnt.ptt b/test/src/loop-tnt-tnt.ptt
new file mode 100644
index 000000000000..d7af873f37da
--- /dev/null
+++ b/test/src/loop-tnt-tnt.ptt
@@ -0,0 +1,90 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a simple for loop using two TNT packets.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+l1: mov rax, 0x0
+l2: jmp l4
+l3: add rax, 0x1
+l4: cmp rax, 0x7
+l5: jle l3
+; @pt p5: tnt(t.t.t.t.t.t)
+; @pt p6: tnt(t.t.n)
+; @pt p7: fup(3: %l6)
+; @pt p8: tip.pgd(0: 0)
+l6: leave
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tnt.8 !!!!!!
+;%0p6 tnt.8 !!.
+;%0p7 fup 3: %0l6
+;%0p8 tip.pgd 0: ????????????????
+
+
+; @pt .exp(ptxed)
+;%0l1 # mov rax, 0x0
+;%0l2 # jmp l4
+;%0l4 # cmp rax, 0x7
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x7
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x7
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x7
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x7
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x7
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x7
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x7
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x7
+;%0l5 # jle l3
+;[disabled]
diff --git a/test/src/loop-tnt.ptt b/test/src/loop-tnt.ptt
new file mode 100644
index 000000000000..412a239a008a
--- /dev/null
+++ b/test/src/loop-tnt.ptt
@@ -0,0 +1,70 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a simple for loop
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+l1: mov rax, 0x0
+l2: jmp l4
+l3: add rax, 0x1
+l4: cmp rax, 0x1
+l5: jle l3
+; @pt p5: tnt(t.t.n)
+; @pt p6: fup(3: %l6)
+; @pt p7: tip.pgd(0: 0)
+l6: leave
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tnt.8 !!.
+;%0p6 fup 3: %0l6
+;%0p7 tip.pgd 0: ????????????????
+
+
+; @pt .exp(ptxed)
+;%0l1 # mov rax, 0x0
+;%0l2 # jmp l4
+;%0l4 # cmp rax, 0x1
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x1
+;%0l5 # jle l3
+;%0l3 # add rax, 0x1
+;%0l4 # cmp rax, 0x1
+;%0l5 # jle l3
+;[disabled]
diff --git a/test/src/mode_exec-tip.ptt b/test/src/mode_exec-tip.ptt
new file mode 100644
index 000000000000..ab9523bf2f28
--- /dev/null
+++ b/test/src/mode_exec-tip.ptt
@@ -0,0 +1,67 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a MODE.EXEC binding to a TIP.
+;
+; The block decoder used to fail on this as it was not able to reach the
+; MODE.EXEC event IP over the far branch that caused the TIP.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(32bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: jmp far [rax]
+l1: hlt
+
+; @pt p4: mode.exec(64bit)
+; @pt p5: tip(3: %l2)
+l2: nop
+
+; @pt p6:fup(1: %l3)
+; @pt p7:tip.pgd(0: %l3)
+l3: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.d
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 mode.exec cs.l
+;%0p5 tip 3: %?l2
+;%0p6 fup 1: %?l3.2
+;%0p7 tip.pgd 0: %?l3.0
+
+
+; @pt .exp(ptxed)
+;%0l0 # jmp far [rax]
+;[exec mode: 64-bit]
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/mtc-cyc_calibrate.ptt b/test/src/mtc-cyc_calibrate.ptt
new file mode 100644
index 000000000000..d788a9941964
--- /dev/null
+++ b/test/src/mtc-cyc_calibrate.ptt
@@ -0,0 +1,56 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based timing.
+;
+; Variant: MTC-based calibration
+;
+; opt:ptdump --time --time-delta --no-wall-clock
+; opt:ptdump --mtc-freq 4 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: mtc(0x2)
+; @pt p4: cyc(0x100)
+; @pt p5: mtc(0x3)
+; @pt p6: cyc(0x100)
+; @pt p7: mtc(0x4)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 mtc 2 tsc +0
+;[%p4: calibration error: no timing information]
+;[%p4: error updating time: no calibration]
+;%0p4 cyc 100 tsc +0
+;%0p5 mtc 3 tsc +40
+;%0p6 cyc 100 tsc +40
+;%0p7 mtc 4 tsc +0
diff --git a/test/src/mtc.ptt b/test/src/mtc.ptt
new file mode 100644
index 000000000000..d439346bf12a
--- /dev/null
+++ b/test/src/mtc.ptt
@@ -0,0 +1,50 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC-based timing.
+;
+; Variant: No calibration needed.
+;
+; opt:ptdump --time --time-delta --no-tcal --no-wall-clock
+; opt:ptdump --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: mtc(0xc1)
+; @pt p4: mtc(0xc2)
+; @pt p5: mtc(0xc4)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 mtc c1 tsc +0
+;%0p4 mtc c2 tsc +400
+;%0p5 mtc c4 tsc +800
diff --git a/test/src/mwait-pwre-exstop_ip-fup-ovf.ptt b/test/src/mwait-pwre-exstop_ip-fup-ovf.ptt
new file mode 100644
index 000000000000..aab3496de08e
--- /dev/null
+++ b/test/src/mwait-pwre-exstop_ip-fup-ovf.ptt
@@ -0,0 +1,64 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test overflow during a power event session.
+;
+; Variant: no power events are discarded
+;
+; Whereas mwait and exstop bind to fup, pwre is stand-alone, so
+; the mwait event seems to overtake the pwre event.
+;
+; opt:ptxed --event:ip
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: psbend()
+l0: nop
+
+; @pt p2: mwait(5, 0)
+; @pt p3: pwre(c1.0, hw)
+; @pt p4: exstop(ip)
+; @pt p5: fup(3: %l0)
+; @pt p6: ovf()
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 psbend
+;%0p2 mwait 00000005, 00000000
+;%0p3 pwre c1.0, hw
+;%0p4 exstop ip
+;%0p5 fup 3: %?l0
+;%0p6 ovf
+
+; @pt .exp(ptxed)
+;[pwre c1.0 hw]
+;[mwait 5 0, ip: %0l0]
+;[exstop, ip: %0l0]
+;[overflow]
diff --git a/test/src/mwait-pwre-exstop_ip-ovf.ptt b/test/src/mwait-pwre-exstop_ip-ovf.ptt
new file mode 100644
index 000000000000..8ec0138c5371
--- /dev/null
+++ b/test/src/mwait-pwre-exstop_ip-ovf.ptt
@@ -0,0 +1,57 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test overflow during a power event session.
+;
+; Variant: mwait and exstop are incomplete and hence discarded.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: psbend()
+
+; @pt p2: mwait(7, 0)
+; @pt p3: pwre(c1.0, hw)
+; @pt p4: exstop(ip)
+; @pt p5: ovf()
+
+; yasm does not like empty files
+ nop
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 psbend
+;%0p2 mwait 00000007, 00000000
+;%0p3 pwre c1.0, hw
+;%0p4 exstop ip
+;%0p5 ovf
+
+; @pt .exp(ptxed)
+;[pwre c1.0 hw]
+;[overflow]
diff --git a/test/src/mwait-pwre-exstop_ip-pwrx.ptt b/test/src/mwait-pwre-exstop_ip-pwrx.ptt
new file mode 100644
index 000000000000..e94efec9d2bc
--- /dev/null
+++ b/test/src/mwait-pwre-exstop_ip-pwrx.ptt
@@ -0,0 +1,67 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a power event session.
+;
+; Variant: no branch tracing
+;
+; Whereas mwait and exstop bind to fup, pwre is stand-alone, so
+; the mwait event seems to overtake the pwre event.
+;
+; opt:ptxed --event:ip
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: psbend()
+
+; @pt p3: mwait(7, 1)
+; @pt p4: pwre(c1.0)
+; @pt p5: exstop(ip)
+; @pt p6: fup(3: %l0)
+l0: nop
+
+; @pt p7: pwrx(int: c1, c0)
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 psbend
+;%0p3 mwait 00000007, 00000001
+;%0p4 pwre c1.0
+;%0p5 exstop ip
+;%0p6 fup 3: %?l0
+;%0p7 pwrx int: c1, c0
+
+; @pt .exp(ptxed)
+;[pwre c1.0]
+;[mwait 7 1, ip: %0l0]
+;[exstop, ip: %0l0]
+;[pwrx int: c1 (c0)]
diff --git a/test/src/ovf-fup.ptt b/test/src/ovf-fup.ptt
new file mode 100644
index 000000000000..3b66313825c6
--- /dev/null
+++ b/test/src/ovf-fup.ptt
@@ -0,0 +1,64 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test packet generation overflow
+;
+; Variant: tracing remains enabled
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: hlt
+
+; @pt p4: ovf()
+; @pt p5: fup(3: %l1)
+l1: nop
+
+; @pt p6: fup(1: %l2)
+; @pt p7: tip.pgd(0: %l3)
+l2: nop
+l3: hlt
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;%0l1
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 ovf
+;%0p5 fup 3: %?l1
+;%0p6 fup 1: %?l2.2
+;%0p7 tip.pgd 0: %?l3.0
diff --git a/test/src/ovf-mnt-fup.ptt b/test/src/ovf-mnt-fup.ptt
new file mode 100644
index 000000000000..547a4f4fd96a
--- /dev/null
+++ b/test/src/ovf-mnt-fup.ptt
@@ -0,0 +1,69 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test packet generation overflow
+;
+; Variant: tracing remains enabled, mnt in-between
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: hlt
+
+; @pt p4: ovf()
+; @pt p5: pad()
+; @pt p6: mnt(0xa000)
+; @pt p7: fup(3: %l1)
+l1: nop
+
+; @pt p8: fup(1: %l2)
+; @pt p9: tip.pgd(0: %l3)
+l2: nop
+l3: hlt
+
+
+; @pt .exp(ptxed)
+;[mnt: a000]
+;[overflow]
+;%0l1
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 ovf
+;%0p5 pad
+;%0p6 mnt a000
+;%0p7 fup 3: %?l1
+;%0p8 fup 1: %?l2.2
+;%0p9 tip.pgd 0: %?l3.0
diff --git a/test/src/ovf-mnt-tip_pge.ptt b/test/src/ovf-mnt-tip_pge.ptt
new file mode 100644
index 000000000000..588ceeeced8c
--- /dev/null
+++ b/test/src/ovf-mnt-tip_pge.ptt
@@ -0,0 +1,73 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test packet generation overflow
+;
+; Variant: tracing disabled, mnt in-between
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(32bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: hlt
+
+; @pt p4: ovf()
+; @pt p5: mnt(0xa000)
+; @pt p6: pad()
+; @pt p7: mode.exec(64bit)
+; @pt p8: tip.pge(3: %l1)
+l1: nop
+
+; @pt p9: fup(1: %l2)
+; @pt p10: tip.pgd(0: %l3)
+l2: nop
+l3: hlt
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;[mnt: a000]
+;[enabled]
+;[exec mode: 64-bit]
+;%0l1
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.d
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 ovf
+;%0p5 mnt a000
+;%0p6 pad
+;%0p7 mode.exec cs.l
+;%0p8 tip.pge 3: %?l1
+;%0p9 fup 1: %?l2.2
+;%0p10 tip.pgd 0: %?l3.0
diff --git a/test/src/ovf-pwre-pwrx-tip_pge.ptt b/test/src/ovf-pwre-pwrx-tip_pge.ptt
new file mode 100644
index 000000000000..2335a1f0a9f2
--- /dev/null
+++ b/test/src/ovf-pwre-pwrx-tip_pge.ptt
@@ -0,0 +1,67 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test overflow that resolves during a power event session.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: psbend()
+
+; @pt p3: ovf()
+
+; @pt p4: pwre(c1.0, hw)
+; @pt p5: pwrx(int: c1, c1)
+
+; @pt p6: tip.pge(3: %l0)
+l0: nop
+
+; @pt p7: fup(1: %l1)
+; @pt p8: tip.pgd(0: %l2)
+l1: nop
+l2: hlt
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 psbend
+;%0p3 ovf
+;%0p4 pwre c1.0, hw
+;%0p5 pwrx int: c1, c1
+;%0p6 tip.pge 3: %?l0
+;%0p7 fup 1: %?l1.2
+;%0p8 tip.pgd 0: %?l2.0
+
+; @pt .exp(ptxed)
+;[overflow]
+;[pwre c1.0 hw]
+;[pwrx int: c1 (c1)]
+;[enabled]
+;%0l0
+;[disabled]
diff --git a/test/src/ovf-timing-fup.ptt b/test/src/ovf-timing-fup.ptt
new file mode 100644
index 000000000000..d354921bc76f
--- /dev/null
+++ b/test/src/ovf-timing-fup.ptt
@@ -0,0 +1,77 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test packet generation overflow
+;
+; Variant: tracing remains enabled, timing packets in-between
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: hlt
+
+; @pt p4: ovf()
+; @pt p5: tsc(0xa00)
+; @pt p6: cbr(0xf)
+; @pt p7: tma(0xa, 0xc)
+; @pt p8: pad()
+; @pt p9: mtc(0x10)
+; @pt p10: cyc(0xa)
+; @pt p11: fup(3: %l1)
+l1: nop
+
+; @pt p12: fup(1: %l2)
+; @pt p13: tip.pgd(0: %l3)
+l2: nop
+l3: hlt
+
+
+; @pt .exp(ptxed)
+;[cbr: f]
+;[overflow]
+;%0l1
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 ovf
+;%0p5 tsc a00
+;%0p6 cbr f
+;%0p7 tma a, c
+;%0p8 pad
+;%0p9 mtc 10
+;%0p10 cyc a
+;%0p11 fup 3: %?l1
+;%0p12 fup 1: %?l2.2
+;%0p13 tip.pgd 0: %?l3.0
diff --git a/test/src/ovf-timing-tip_pge.ptt b/test/src/ovf-timing-tip_pge.ptt
new file mode 100644
index 000000000000..0e5504605483
--- /dev/null
+++ b/test/src/ovf-timing-tip_pge.ptt
@@ -0,0 +1,81 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test packet generation overflow
+;
+; Variant: tracing disabled, timing packets in-between
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(32bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: hlt
+
+; @pt p4: ovf()
+; @pt p5: tsc(0xa00)
+; @pt p6: cbr(0xf)
+; @pt p7: tma(0xa, 0xc)
+; @pt p8: pad()
+; @pt p9: mtc(0x10)
+; @pt p10: cyc(0xa)
+; @pt p11: mode.exec(64bit)
+; @pt p12: tip.pge(3: %l1)
+l1: nop
+
+; @pt p13: fup(1: %l2)
+; @pt p14: tip.pgd(0: %l3)
+l2: nop
+l3: hlt
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;[cbr: f]
+;[enabled]
+;[exec mode: 64-bit]
+;%0l1
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.d
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 ovf
+;%0p5 tsc a00
+;%0p6 cbr f
+;%0p7 tma a, c
+;%0p8 pad
+;%0p9 mtc 10
+;%0p10 cyc a
+;%0p11 mode.exec cs.l
+;%0p12 tip.pge 3: %?l1
+;%0p13 fup 1: %?l2.2
+;%0p14 tip.pgd 0: %?l3.0
diff --git a/test/src/ovf-tip_pge.ptt b/test/src/ovf-tip_pge.ptt
new file mode 100644
index 000000000000..22c91641c852
--- /dev/null
+++ b/test/src/ovf-tip_pge.ptt
@@ -0,0 +1,68 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test packet generation overflow
+;
+; Variant: tracing disabled
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(32bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: hlt
+
+; @pt p4: ovf()
+; @pt p5: mode.exec(64bit)
+; @pt p6: tip.pge(3: %l1)
+l1: nop
+
+; @pt p7: fup(1: %l2)
+; @pt p8: tip.pgd(0: %l3)
+l2: nop
+l3: hlt
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;[enabled]
+;[exec mode: 64-bit]
+;%0l1
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.d
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 ovf
+;%0p5 mode.exec cs.l
+;%0p6 tip.pge 3: %?l1
+;%0p7 fup 1: %?l2.2
+;%0p8 tip.pgd 0: %?l3.0
diff --git a/test/src/ovf.ptt b/test/src/ovf.ptt
new file mode 100644
index 000000000000..8c421627fb5d
--- /dev/null
+++ b/test/src/ovf.ptt
@@ -0,0 +1,50 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test packet generation overflow
+;
+; Variant: tracing disabled throughout
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: psbend()
+
+; @pt p2: ovf()
+
+; yasm does not like empty files
+ nop
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 psbend
+;%0p2 ovf
+
+; @pt .exp(ptxed)
+;[overflow]
diff --git a/test/src/pip-far_call.ptt b/test/src/pip-far_call.ptt
new file mode 100644
index 000000000000..c389e42b43bf
--- /dev/null
+++ b/test/src/pip-far_call.ptt
@@ -0,0 +1,68 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that PIP binds to a far branch
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: fup(3: %l1)
+; @pt p4: psbend()
+l1: nop
+
+; @pt p5: pip(0xcdcdc0)
+; @pt p6: tip(3: %l4)
+l2: call far [rax] ; l4
+l3: hlt
+
+l4: nop
+
+; @pt p7: fup(1: %l5)
+; @pt p8: tip.pgd(0: %l6)
+l5: nop
+l6: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 fup 3: %?l1
+;%0p4 psbend
+;%0p5 pip cdcdc0 cr3 0000000000cdcdc0
+;%0p6 tip 3: %?l4
+;%0p7 fup 1: %?l5.2
+;%0p8 tip.pgd 0: %?l6.0
+
+
+; @pt .exp(ptxed)
+;%0l1 # nop
+;%0l2 # call far [rax] # l4
+;[paging, cr3: 0000000000cdcdc0]
+;%0l4 # nop
+;[disabled]
diff --git a/test/src/pip-pip_mov_cr3-fail.ptt b/test/src/pip-pip_mov_cr3-fail.ptt
new file mode 100644
index 000000000000..5247bf1b0574
--- /dev/null
+++ b/test/src/pip-pip_mov_cr3-fail.ptt
@@ -0,0 +1,61 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that a paging event is bound to the next MOV CR3 instruction.
+;
+; Variant: Only one event binds to each instruction. While searching
+; for the next MOV CR3 to bind the second event, we run out
+; of code.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+; @pt p5: pip(0xa000)
+; @pt p6: pip(0xb000)
+l1: mov cr3, rax
+l2: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 pip a000 cr3 000000000000a000
+;%0p6 pip b000 cr3 000000000000b000
+
+
+; @pt .exp(ptxed)
+;%0l1 # mov cr3, rax
+;[paging, cr3: 000000000000a000]
+;%0l2 # hlt
+;[%eos, 100004: error: no memory mapped at this address]
diff --git a/test/src/pip-vmcs-tip_pgd.ptt b/test/src/pip-vmcs-tip_pgd.ptt
new file mode 100755
index 000000000000..33f4d6ddedc0
--- /dev/null
+++ b/test/src/pip-vmcs-tip_pgd.ptt
@@ -0,0 +1,60 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a PIP and VMCS binding to a synchronous disable.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+
+; @pt p4: pip(0xcdcdc0)
+; @pt p5: vmcs(0xcdcdc000)
+; @pt p6: tip.pgd(3: %l2)
+l0: vmlaunch
+l1: hlt
+
+l2: nop
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 pip cdcdc0 cr3 0000000000cdcdc0
+;%0p5 vmcs cdcdc000 vmcs 00000000cdcdc000
+;%0p6 tip.pgd 3: %?l2
+
+
+; @pt .exp(ptxed)
+;%0l0
+;[paging, cr3: 0000000000cdcdc0]
+;[vmcs, base: 00000000cdcdc000]
+;[disabled]
diff --git a/test/src/pip_mov_cr3-pip_mov_cr3.ptt b/test/src/pip_mov_cr3-pip_mov_cr3.ptt
new file mode 100644
index 000000000000..fba4cb58adee
--- /dev/null
+++ b/test/src/pip_mov_cr3-pip_mov_cr3.ptt
@@ -0,0 +1,66 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that a paging event is bound to the next MOV CR3 instruction.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+; @pt p5: pip(0xa000)
+l1: mov cr3, rax
+
+; @pt p6: pip(0xb000)
+l2: mov cr3, rax
+
+; @pt p7: fup(1: %l3)
+; @pt p8: tip.pgd(0: %l4)
+l3: nop
+l4: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 pip a000 cr3 000000000000a000
+;%0p6 pip b000 cr3 000000000000b000
+;%0p7 fup 1: %?l3.2
+;%0p8 tip.pgd 0: %?l4.0
+
+
+; @pt .exp(ptxed)
+;%0l1 # mov cr3, rax
+;[paging, cr3: 000000000000a000]
+;%0l2 # mov cr3, rax
+;[paging, cr3: 000000000000b000]
+;[disabled]
diff --git a/test/src/psb-empty.ptt b/test/src/psb-empty.ptt
new file mode 100644
index 000000000000..d9d5660ead8f
--- /dev/null
+++ b/test/src/psb-empty.ptt
@@ -0,0 +1,45 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that we do not diagnose an error for an empty trace.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; yasm does not like empty files
+ nop
+
+
+; @pt .exp(ptxed)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
diff --git a/test/src/psb-exstop.ptt b/test/src/psb-exstop.ptt
new file mode 100644
index 000000000000..4471b3feac67
--- /dev/null
+++ b/test/src/psb-exstop.ptt
@@ -0,0 +1,61 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test an execution stop event.
+;
+; Variant: before the first instruction, tracing starts enabled
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: fup(3: %l0)
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+l0: nop
+
+; @pt p4: exstop(ip)
+; @pt p5: fup(1: %l0)
+
+; @pt p6:fup(1: %l1)
+; @pt p7:tip.pgd(0: %l1)
+l1: hlt
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 fup 3: %?l0
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+;%0p4 exstop ip
+;%0p5 fup 1: %?l0.2
+;%0p6 fup 1: %?l1.2
+;%0p7 tip.pgd 0: %?l1.0
+
+; @pt .exp(ptxed)
+;[exstop]
+;%0l0
+;[disabled] \ No newline at end of file
diff --git a/test/src/psb-fup-psbend.ptt b/test/src/psb-fup-psbend.ptt
new file mode 100644
index 000000000000..fa30e0a58201
--- /dev/null
+++ b/test/src/psb-fup-psbend.ptt
@@ -0,0 +1,53 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that we do not diagnose an error for an empty trace.
+;
+; Variant: tracing is enabled
+; we don't know whether the instruction is actually executed
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: fup(3: 0)
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+
+; yasm does not like empty files
+ nop
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 fup 3: 0000000000000000
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+
+
+; @pt .exp(ptxed)
+;[end of trace]
diff --git a/test/src/psb-fup-tip_pgd-stop.ptt b/test/src/psb-fup-tip_pgd-stop.ptt
new file mode 100644
index 000000000000..ab0a3b558e55
--- /dev/null
+++ b/test/src/psb-fup-tip_pgd-stop.ptt
@@ -0,0 +1,56 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TraceStop is applied to the same instruction as a preceding TIP.PGD.
+;
+; Variant: we sync'ed right at the TIP.PGD.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: psbend()
+l1: nop
+
+; @pt p4: fup(1: %l1)
+; @pt p5: tip.pgd(0: %l2)
+; @pt p6: stop()
+l2: hlt
+
+; @pt .exp(ptxed)
+;[disabled]
+;[stopped]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 psbend
+;%0p4 fup 1: %?l1.2
+;%0p5 tip.pgd 0: %?l2.0
+;%0p6 stop
diff --git a/test/src/psb-fup-tip_pgd.ptt b/test/src/psb-fup-tip_pgd.ptt
new file mode 100644
index 000000000000..a07f42ef25e6
--- /dev/null
+++ b/test/src/psb-fup-tip_pgd.ptt
@@ -0,0 +1,54 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that FUP + TIP.PGD disables tracing as part of the asynchronous
+; branch.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: nop
+l2: nop
+; @pt p5: fup(1: %l1)
+; @pt p6: tip.pgd(3: %l2)
+
+
+; @pt .exp(ptxed)
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 fup 1: %?l1.2
+;%0p6 tip.pgd 3: %?l2
diff --git a/test/src/psb-mnt-fup-psbend.ptt b/test/src/psb-mnt-fup-psbend.ptt
new file mode 100644
index 000000000000..f0b9e2454a22
--- /dev/null
+++ b/test/src/psb-mnt-fup-psbend.ptt
@@ -0,0 +1,55 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that maintenance events are reported.
+;
+; Variant: the mnt appears in PSB+, tracing enabled
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: fup(3: 0)
+; @pt p2: mnt(0xa000)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+; yasm does not like empty files
+ nop
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 fup 3: 0000000000000000
+;%0p2 mnt a000
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+
+
+; @pt .exp(ptxed)
+;[mnt: a000]
+;[end of trace]
diff --git a/test/src/psb-mnt-psbend.ptt b/test/src/psb-mnt-psbend.ptt
new file mode 100644
index 000000000000..d1a1c11d0175
--- /dev/null
+++ b/test/src/psb-mnt-psbend.ptt
@@ -0,0 +1,50 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that maintenance events are reported.
+;
+; Variant: the mnt appears in PSB+, tracing disabled
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mnt(0xa000)
+; @pt p2: psbend()
+
+; yasm does not like empty files
+ nop
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mnt a000
+;%0p2 psbend
+
+
+; @pt .exp(ptxed)
+;[mnt: a000]
diff --git a/test/src/psb-ovf-fup.ptt b/test/src/psb-ovf-fup.ptt
new file mode 100644
index 000000000000..50eb2f7e8482
--- /dev/null
+++ b/test/src/psb-ovf-fup.ptt
@@ -0,0 +1,61 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test packet generation overflow
+;
+; Variant: tracing remains enabled, overflow during PSB+
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: ovf()
+; @pt p4: fup(3: %l1)
+l0: hlt
+l1: nop
+
+; @pt p5: fup(1: %l2)
+; @pt p6: tip.pgd(0: %l3)
+l2: nop
+l3: hlt
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;%0l1
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 ovf
+;%0p4 fup 3: %?l1
+;%0p5 fup 1: %?l2.2
+;%0p6 tip.pgd 0: %?l3.0
diff --git a/test/src/psb-ovf-tip_pge.ptt b/test/src/psb-ovf-tip_pge.ptt
new file mode 100644
index 000000000000..042fe2643475
--- /dev/null
+++ b/test/src/psb-ovf-tip_pge.ptt
@@ -0,0 +1,66 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test packet generation overflow
+;
+; Variant: tracing disabled, overflow during PSB+
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(32bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: ovf()
+l0: hlt
+
+; @pt p4: mode.exec(64bit)
+; @pt p5: tip.pge(3: %l1)
+l1: nop
+
+; @pt p6: fup(1: %l2)
+; @pt p7: tip.pgd(0: %l3)
+l2: nop
+l3: hlt
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;[enabled]
+;[exec mode: 64-bit]
+;%0l1
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.d
+;%0p2 fup 3: %?l0
+;%0p3 ovf
+;%0p4 mode.exec cs.l
+;%0p5 tip.pge 3: %?l1
+;%0p6 fup 1: %?l2.2
+;%0p7 tip.pgd 0: %?l3.0
diff --git a/test/src/psb-pip-psb.ptt b/test/src/psb-pip-psb.ptt
new file mode 100644
index 000000000000..0334ceddce11
--- /dev/null
+++ b/test/src/psb-pip-psb.ptt
@@ -0,0 +1,55 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that a PIP is processed while tracing is disabled.
+;
+; Variant: not enabled between two syncpoints.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: pip(0xa00)
+
+; @pt p4: psb()
+; @pt p5: psbend()
+
+; yasm does not like empty files
+ nop
+
+
+; @pt .exp(ptxed)
+;[paging, cr3: 0000000000000a00]
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 pip a00 cr3 0000000000000a00
+;%0p4 psb
+;%0p5 psbend
diff --git a/test/src/psb-pip-tip_pge.ptt b/test/src/psb-pip-tip_pge.ptt
new file mode 100644
index 000000000000..c9b567bdfaac
--- /dev/null
+++ b/test/src/psb-pip-tip_pge.ptt
@@ -0,0 +1,62 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that a PIP is processed while tracing is disabled.
+;
+; Variant: not enabled at syncpoint.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+
+; @pt p4: pip(0xa00)
+; @pt p5: tip.pge(3: %l1)
+l1: nop
+
+l2: nop
+l3: hlt
+; @pt p6: fup(1: %l2)
+; @pt p7: tip.pgd(0: %l3)
+
+
+; @pt .exp(ptxed)
+;[paging, cr3: 0000000000000a00]
+;[enabled]
+;%0l1 # nop
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+;%0p4 pip a00 cr3 0000000000000a00
+;%0p5 tip.pge 3: %0l1
+;%0p6 fup 1: %?l2.2
+;%0p7 tip.pgd 0: %?l3.0
diff --git a/test/src/psb-psb.ptt b/test/src/psb-psb.ptt
new file mode 100644
index 000000000000..75940712f41c
--- /dev/null
+++ b/test/src/psb-psb.ptt
@@ -0,0 +1,64 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that linear code between two PSB+ is printed correctly
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: nop
+
+; @pt p5: psb()
+; @pt p6: fup(3: %l2)
+; @pt p7: mode.exec(64bit)
+; @pt p8: psbend()
+
+; @pt p9: fup(3: %l2)
+; @pt p10: tip.pgd(0: 0)
+l2: nop
+
+
+; @pt .exp(ptxed)
+;%0l1 # nop
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 psb
+;%0p6 fup 3: %0l2
+;%0p7 mode.exec cs.l
+;%0p8 psbend
+;%0p9 fup 3: %0l2
+;%0p10 tip.pgd 0: ????????????????
diff --git a/test/src/psb-stop.ptt b/test/src/psb-stop.ptt
new file mode 100644
index 000000000000..3901feade79b
--- /dev/null
+++ b/test/src/psb-stop.ptt
@@ -0,0 +1,48 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TraceStop is applied to the same instruction as a preceding TIP.PGD.
+;
+; Variant: we just sync'ed.
+;
+
+org 0x100000
+
+; @pt p1: psb()
+; @pt p2: psbend()
+; @pt p3: stop()
+
+; yasm does not like empty files
+nop
+
+; @pt .exp(ptxed)
+;[stopped]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 stop
diff --git a/test/src/psb-tnt-psb.ptt b/test/src/psb-tnt-psb.ptt
new file mode 100644
index 000000000000..60c33974ad52
--- /dev/null
+++ b/test/src/psb-tnt-psb.ptt
@@ -0,0 +1,69 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that we keep the tnt cache intact when reading ahead over a PSB+.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: jne l3
+; @pt p5: tnt(t)
+l2: hlt
+
+; @pt p6: psb()
+; @pt p7: mode.exec(64bit)
+; @pt p8: fup(3: %l3)
+; @pt p9: psbend()
+l3: nop
+l4: nop
+; @pt p10: fup(1: %l4)
+; @pt p11: tip.pgd(0: %l5)
+l5: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tnt.8 !
+;%0p6 psb
+;%0p7 mode.exec cs.l
+;%0p8 fup 3: %0l3
+;%0p9 psbend
+;%0p10 fup 1: %?l4.2
+;%0p11 tip.pgd 0: %?l5.0
+
+
+; @pt .exp(ptxed)
+;%0l1 # jne l3
+;%0l3 # nop
+;[disabled]
diff --git a/test/src/psb-tsx.ptt b/test/src/psb-tsx.ptt
new file mode 100644
index 000000000000..29d9e2066c42
--- /dev/null
+++ b/test/src/psb-tsx.ptt
@@ -0,0 +1,57 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that MODE.TSX in PSB+ is used to initialize the ptxed state.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: mode.tsx(begin)
+; @pt p5: psbend()
+l1: nop
+
+; @pt p6: fup(3: %l2)
+; @pt p7: tip.pgd(0: 0)
+l2: nop
+
+
+; @pt .exp(ptxed)
+;? %0l1 # nop
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 mode.tsx intx
+;%0p5 psbend
+;%0p6 fup 3: %0l2
+;%0p7 tip.pgd 0: ????????????????
diff --git a/test/src/psb-tsx_abort-tip-fup-tip_pgd.ptt b/test/src/psb-tsx_abort-tip-fup-tip_pgd.ptt
new file mode 100644
index 000000000000..638740c469cd
--- /dev/null
+++ b/test/src/psb-tsx_abort-tip-fup-tip_pgd.ptt
@@ -0,0 +1,70 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TSX aborts are shown correctly.
+;
+; Variant: the abort happens when executing the first instruction.
+; tracing is disabled after jumping to the abort handler.
+;
+; opt:ptxed --event:ip
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: fup(3: %l0)
+; @pt p2: mode.exec(64bit)
+; @pt p3: mode.tsx(begin)
+; @pt p4: psbend()
+l0: hlt
+
+; @pt p5: mode.tsx(abort)
+; @pt p6: fup(1: %l0)
+; @pt p7: tip(1: %l1)
+l1: hlt
+
+; @pt p8: fup(1: %l1)
+; @pt p9: tip.pgd(0: %l1)
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 fup 3: %?l0
+;%0p2 mode.exec cs.l
+;%0p3 mode.tsx intx
+;%0p4 psbend
+;%0p5 mode.tsx abrt
+;%0p6 fup 1: %?l0.2
+;%0p7 tip 1: %?l1.2
+;%0p8 fup 1: %?l1.2
+;%0p9 tip.pgd 0: %?l1.0
+
+
+; @pt .exp(ptxed)
+;[aborted, ip: %0l0]
+;[interrupt, from: %0l0, to: %0l1]
+;[disabled, at: %0l1]
diff --git a/test/src/psb-tsx_abort-tip_pgd.ptt b/test/src/psb-tsx_abort-tip_pgd.ptt
new file mode 100644
index 000000000000..9665a633ba20
--- /dev/null
+++ b/test/src/psb-tsx_abort-tip_pgd.ptt
@@ -0,0 +1,61 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TSX aborts are shown correctly.
+;
+; Variant: the abort happens when executing the first instruction.
+; the abort disables tracing.
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: fup(3: %l0)
+; @pt p2: mode.exec(64bit)
+; @pt p3: mode.tsx(begin)
+; @pt p4: psbend()
+l0: hlt
+
+; @pt p5: mode.tsx(abort)
+; @pt p6: fup(1: %l0)
+; @pt p7: tip.pgd(0: %l0)
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 fup 3: %?l0
+;%0p2 mode.exec cs.l
+;%0p3 mode.tsx intx
+;%0p4 psbend
+;%0p5 mode.tsx abrt
+;%0p6 fup 1: %?l0.2
+;%0p7 tip.pgd 0: %?l0.0
+
+
+; @pt .exp(ptxed)
+;[aborted]
+;[disabled]
diff --git a/test/src/psb-tsx_abort.ptt b/test/src/psb-tsx_abort.ptt
new file mode 100644
index 000000000000..16b83f78e54f
--- /dev/null
+++ b/test/src/psb-tsx_abort.ptt
@@ -0,0 +1,69 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TSX aborts are shown correctly.
+;
+; Variant: the abort happens when executing the first instruction.
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: fup(3: %l0)
+; @pt p2: mode.exec(64bit)
+; @pt p3: mode.tsx(begin)
+; @pt p4: psbend()
+l0: hlt
+
+; @pt p5: mode.tsx(abort)
+; @pt p6: fup(1: %l0)
+; @pt p7: tip(1: %l1)
+l1: nop
+
+; @pt p8: fup(1: %l2)
+; @pt p9: tip.pgd(0: %l2)
+l2: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 fup 3: %?l0
+;%0p2 mode.exec cs.l
+;%0p3 mode.tsx intx
+;%0p4 psbend
+;%0p5 mode.tsx abrt
+;%0p6 fup 1: %?l0.2
+;%0p7 tip 1: %?l1.2
+;%0p8 fup 1: %?l2.2
+;%0p9 tip.pgd 0: %?l2.0
+
+
+; @pt .exp(ptxed)
+;[aborted]
+;[interrupt]
+;%0l1
+;[disabled]
diff --git a/test/src/psb-vmcs.ptt b/test/src/psb-vmcs.ptt
new file mode 100644
index 000000000000..808b95eb8d40
--- /dev/null
+++ b/test/src/psb-vmcs.ptt
@@ -0,0 +1,46 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that we print VMCS correctly
+;
+
+org 0x100000
+
+; @pt p1: psb()
+; @pt p2: vmcs(0xcdcdf000)
+; @pt p3: psbend()
+
+; yasm does not like empty files
+ nop
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 vmcs cdcdf000 vmcs 00000000cdcdf000
+;%0p3 psbend
+
+
+; @pt .exp(ptxed)
diff --git a/test/src/psb_nofup-psb.ptt b/test/src/psb_nofup-psb.ptt
new file mode 100644
index 000000000000..1d746a434117
--- /dev/null
+++ b/test/src/psb_nofup-psb.ptt
@@ -0,0 +1,61 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that linear code between two PSB+ is printed correctly
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+ nop
+
+; @pt p3: psb()
+; @pt p4: fup(3: %l2)
+; @pt p5: mode.exec(64bit)
+; @pt p6: psbend()
+l2: nop
+
+; @pt p7: fup(3: %l3)
+; @pt p8: tip.pgd(0: 0)
+l3: nop
+
+
+; @pt .exp(ptxed)
+;%0l2 # nop
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 psb
+;%0p4 fup 3: %0l2
+;%0p5 mode.exec cs.l
+;%0p6 psbend
+;%0p7 fup 3: %0l3
+;%0p8 tip.pgd 0: ????????????????
diff --git a/test/src/ptdump-exec-mode.ptt b/test/src/ptdump-exec-mode.ptt
new file mode 100644
index 000000000000..0c74e4ceb58e
--- /dev/null
+++ b/test/src/ptdump-exec-mode.ptt
@@ -0,0 +1,46 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that ptdump prints the execution mode correctly.
+;
+; opt:ptdump --exec-mode
+
+org 0x1000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: mode.exec(32bit)
+; @pt p4: mode.exec(16bit)
+; @pt p5: psbend()
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l em 64-bit
+;%0p3 mode.exec cs.d em 32-bit
+;%0p4 mode.exec em 16-bit
+;%0p5 psbend
diff --git a/test/src/ptdump-last-ip.ptt b/test/src/ptdump-last-ip.ptt
new file mode 100644
index 000000000000..d69b6b637b2e
--- /dev/null
+++ b/test/src/ptdump-last-ip.ptt
@@ -0,0 +1,55 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that ptdump prints last-ip correctly.
+;
+; opt:ptdump --lastip
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: psbend()
+
+; @pt p2: fup(6: 0x0a00ccccddddeeee)
+; @pt p3: tip(4: 0xffffeeeeffff)
+; @pt p4: tip.pge(1: 0xdddd)
+; @pt p5: fup(3: 0xffffddddeeee)
+; @pt p6: tip.pgd(2: 0xeeeeffff)
+
+
+; yasm does not like empty files
+ nop
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 psbend
+;%0p2 fup 6: 0a00ccccddddeeee ip 0a00ccccddddeeee
+;%0p3 tip 4: ????ffffeeeeffff ip 0a00ffffeeeeffff
+;%0p4 tip.pge 1: ????????????dddd ip 0a00ffffeeeedddd
+;%0p5 fup 3: ffffffffddddeeee ip ffffffffddddeeee
+;%0p6 tip.pgd 2: ????????eeeeffff ip ffffffffeeeeffff
diff --git a/test/src/ptdump-no-offset-raw.ptt b/test/src/ptdump-no-offset-raw.ptt
new file mode 100644
index 000000000000..096cf6c127d7
--- /dev/null
+++ b/test/src/ptdump-no-offset-raw.ptt
@@ -0,0 +1,45 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that ptdump honors --no-offset
+;
+; Variant: the raw packet bytes are printed in the first column.
+;
+; opt:ptdump --no-offset --raw
+
+org 0x1000
+bits 64
+
+; @pt psb()
+; @pt psbend()
+
+; yasm does not like empty files
+ nop
+
+
+; @pt .exp(ptdump)
+;02820282028202820282028202820282 psb
+;0223 psbend
diff --git a/test/src/ptdump-no-offset.ptt b/test/src/ptdump-no-offset.ptt
new file mode 100644
index 000000000000..a1227b22f9c8
--- /dev/null
+++ b/test/src/ptdump-no-offset.ptt
@@ -0,0 +1,45 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that ptdump honors --no-offset
+;
+; Variant: the packet is printed inthe first column.
+;
+; opt:ptdump --no-offset
+
+org 0x1000
+bits 64
+
+; @pt psb()
+; @pt psbend()
+
+; yasm does not like empty files
+ nop
+
+
+; @pt .exp(ptdump)
+;psb
+;psbend
diff --git a/test/src/ptw-fup.ptt b/test/src/ptw-fup.ptt
new file mode 100644
index 000000000000..dedd248f92d0
--- /dev/null
+++ b/test/src/ptw-fup.ptt
@@ -0,0 +1,59 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test PTW
+;
+; Variant: tracing is disabled, FUP on PTW
+;
+; opt:ptxed --event:ip
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: ptw(0: 0xabcd, ip)
+; @pt p4: fup(3: %l0)
+l0: nop
+
+; @pt p5: ptw(1: 0xef09, ip)
+; @pt p6: fup(3: %l1)
+l1: nop
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 ptw 0: abcd, ip
+;%0p4 fup 3: %?l0
+;%0p5 ptw 1: ef09, ip
+;%0p6 fup 3: %?l1
+
+; @pt .exp(ptxed)
+;[ptwrite: abcd, ip: %0l0]
+;[ptwrite: ef09, ip: %0l1]
diff --git a/test/src/ptw.ptt b/test/src/ptw.ptt
new file mode 100644
index 000000000000..aeaeea1f0145
--- /dev/null
+++ b/test/src/ptw.ptt
@@ -0,0 +1,56 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test PTW
+;
+; Variant: tracing is disabled, no FUP on PTW
+;
+; opt:ptxed --event:ip
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: ptw(0: 0xabcd)
+; @pt p4: ptw(1: 0xef09)
+
+; yasm does not like empty files
+ nop
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 ptw 0: abcd
+;%0p4 ptw 1: ef09
+
+
+; @pt .exp(ptxed)
+;[ptwrite: abcd]
+;[ptwrite: ef09]
diff --git a/test/src/ptxed-block-stat.ptt b/test/src/ptxed-block-stat.ptt
new file mode 100644
index 000000000000..a5998cde4374
--- /dev/null
+++ b/test/src/ptxed-block-stat.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that ptxed counts instructions and blocks correctly.
+;
+; opt:ptxed --block-decoder --stat
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+l1: nop
+l2: nop
+l3: nop
+
+; @pt p4: fup(1: %l4)
+; @pt p5: tip.pgd(0: %l4)
+l4: hlt
+
+
+; @pt .exp(ptxed)
+;%0l0
+;%0l1
+;%0l2
+;%0l3
+;[disabled]
+;insn: 4.
+;blocks: 1.
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 fup 1: %?l4.2
+;%0p5 tip.pgd 0: %?l4.0
diff --git a/test/src/ptxed-block-stat_blocks.ptt b/test/src/ptxed-block-stat_blocks.ptt
new file mode 100644
index 000000000000..e2455b7cce87
--- /dev/null
+++ b/test/src/ptxed-block-stat_blocks.ptt
@@ -0,0 +1,62 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that ptxed counts instructions and blocks correctly.
+;
+; opt:ptxed --block-decoder --stat --stat:blocks
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+l1: nop
+l2: nop
+l3: nop
+
+; @pt p4: fup(1: %l4)
+; @pt p5: tip.pgd(0: %l4)
+l4: hlt
+
+
+; @pt .exp(ptxed)
+;%0l0
+;%0l1
+;%0l2
+;%0l3
+;[disabled]
+;blocks: 1.
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 fup 1: %?l4.2
+;%0p5 tip.pgd 0: %?l4.0
diff --git a/test/src/ptxed-end_on_call-fup-tip.ptt b/test/src/ptxed-end_on_call-fup-tip.ptt
new file mode 100644
index 000000000000..c4085d54fb9a
--- /dev/null
+++ b/test/src/ptxed-end_on_call-fup-tip.ptt
@@ -0,0 +1,73 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test the end-on-call block decoder option.
+;
+; Variant: there's an async branch event right at the call destination
+;
+; opt:ptxed --block-decoder --block:show-blocks --block:end-on-call
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+l1: call l3
+l2: hlt
+
+; @pt p4: fup(1: %l3)
+; @pt p5: tip(1: %l5)
+l3: nop
+l4: hlt
+
+; @pt p6: fup(1: %l6)
+; @pt p7: tip.pgd(0: %l6)
+l5: nop
+l6: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 fup 1: %?l3.2
+;%0p5 tip 1: %?l5.2
+;%0p6 fup 1: %?l6.2
+;%0p7 tip.pgd 0: %?l6.0
+
+
+; @pt .exp(ptxed)
+;[block]
+;%0l0 # nop
+;%0l1 # call l3
+;[interrupt]
+;[block]
+;%0l5 # nop
+;[disabled]
diff --git a/test/src/ptxed-end_on_call-fup-tip_pgd.ptt b/test/src/ptxed-end_on_call-fup-tip_pgd.ptt
new file mode 100644
index 000000000000..0d6e33b97dc2
--- /dev/null
+++ b/test/src/ptxed-end_on_call-fup-tip_pgd.ptt
@@ -0,0 +1,66 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test the end-on-call block decoder option.
+;
+; Variant: there's an async disable event after the call.
+;
+; opt:ptxed --block-decoder --block:show-blocks --block:end-on-call
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+l1: call l3
+l2: hlt
+
+l3: nop
+
+; @pt p4: fup(1: %l4)
+; @pt p5: tip.pgd(0: %l4)
+l4: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 fup 1: %?l4.2
+;%0p5 tip.pgd 0: %?l4.0
+
+
+; @pt .exp(ptxed)
+;[block]
+;%0l0 # nop
+;%0l1 # call l3
+;[block]
+;%0l3 # nop
+;[disabled]
diff --git a/test/src/ptxed-end_on_call-ret_tip.ptt b/test/src/ptxed-end_on_call-ret_tip.ptt
new file mode 100644
index 000000000000..d5993c1e8a5a
--- /dev/null
+++ b/test/src/ptxed-end_on_call-ret_tip.ptt
@@ -0,0 +1,82 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test the end-on-call block decoder option.
+;
+; Variant: there's an uncompressed return after the call
+;
+; opt:ptxed --block-decoder --block:show-blocks --block:end-on-call
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+l1: call l4
+l2: nop
+l3: hlt
+
+; we first return to l0 to take another round using the block cache.
+;
+; @pt p4: tip(1: %l0)
+; @pt p5: tip(1: %l2)
+l4: nop
+l5: ret
+
+; @pt p6: fup(1: %l3)
+; @pt p7: tip.pgd(0: %l3)
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 tip 1: %?l0.2
+;%0p5 tip 1: %?l2.2
+;%0p6 fup 1: %?l3.2
+;%0p7 tip.pgd 0: %?l3.0
+
+
+; @pt .exp(ptxed)
+;[block]
+;%0l0 # nop
+;%0l1 # call l4
+;[block]
+;%0l4 # nop
+;%0l5 # ret
+;[block]
+;%0l0 # nop
+;%0l1 # call l4
+;[block]
+;%0l4 # nop
+;%0l5 # ret
+;[block]
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/ptxed-end_on_call-ret_tnt.ptt b/test/src/ptxed-end_on_call-ret_tnt.ptt
new file mode 100644
index 000000000000..a440a005e67c
--- /dev/null
+++ b/test/src/ptxed-end_on_call-ret_tnt.ptt
@@ -0,0 +1,72 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test the end-on-call block decoder option.
+;
+; Variant: there's a compressed return after the call
+;
+; opt:ptxed --block-decoder --block:show-blocks --block:end-on-call
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+l1: call l4
+l2: nop
+l3: hlt
+
+; @pt p4: tnt(t)
+l4: nop
+l5: ret
+
+; @pt p5: fup(1: %l3)
+; @pt p6: tip.pgd(0: %l3)
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 tnt.8 !
+;%0p5 fup 1: %?l3.2
+;%0p6 tip.pgd 0: %?l3.0
+
+
+; @pt .exp(ptxed)
+;[block]
+;%0l0 # nop
+;%0l1 # call l4
+;[block]
+;%0l4 # nop
+;%0l5 # ret
+;[block]
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/ptxed-end_on_call-tip_pgd.ptt b/test/src/ptxed-end_on_call-tip_pgd.ptt
new file mode 100644
index 000000000000..66b2b574e6e3
--- /dev/null
+++ b/test/src/ptxed-end_on_call-tip_pgd.ptt
@@ -0,0 +1,65 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test the end-on-call block decoder option.
+;
+; Variant: there's a disable event after the call.
+;
+; opt:ptxed --block-decoder --block:show-blocks --block:end-on-call
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+l1: call l3
+l2: hlt
+
+l3: nop
+l4: ret
+
+; @pt p4: tip.pgd(0: %l2)
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 tip.pgd 0: %?l2.0
+
+
+; @pt .exp(ptxed)
+;[block]
+;%0l0 # nop
+;%0l1 # call l3
+;[block]
+;%0l3 # nop
+;%0l4 # ret
+;[disabled]
diff --git a/test/src/ptxed-end_on_jump-fup-tip_pgd.ptt b/test/src/ptxed-end_on_jump-fup-tip_pgd.ptt
new file mode 100644
index 000000000000..4aae6e1a3115
--- /dev/null
+++ b/test/src/ptxed-end_on_jump-fup-tip_pgd.ptt
@@ -0,0 +1,65 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test the end-on-jump block decoder option.
+;
+; Variant: there's an async disable event after the jump.
+;
+; opt:ptxed --block-decoder --block:show-blocks --block:end-on-jump
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+l1: jmp l3
+l2: hlt
+
+l3: nop
+
+; @pt p4: fup(1: %l4)
+; @pt p5: tip.pgd(0: %l4)
+l4: hlt
+
+
+; @pt .exp(ptxed)
+;[block]
+;%0l0 # nop
+;%0l1 # jmp l3
+;[block]
+;%0l3 # nop
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 fup 1: %?l4.2
+;%0p5 tip.pgd 0: %?l4.0
diff --git a/test/src/ptxed-insn-stat.ptt b/test/src/ptxed-insn-stat.ptt
new file mode 100644
index 000000000000..4b1cfe6be6bd
--- /dev/null
+++ b/test/src/ptxed-insn-stat.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that ptxed counts instructions correctly.
+;
+; opt:ptxed --insn-decoder --stat
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+l1: nop
+l2: nop
+l3: nop
+
+; @pt p4:fup(3: %l4)
+; @pt p5:tip.pgd(0: %l5)
+l4: nop
+l5: hlt
+
+
+; @pt .exp(ptxed)
+;%0l0
+;%0l1
+;%0l2
+;%0l3
+;[disabled]
+;insn: 4.
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %0l0
+;%0p3 psbend
+;%0p4 fup 3: %0l4
+;%0p5 tip.pgd 0: %?l5.0
diff --git a/test/src/ptxed-stat_insn.ptt b/test/src/ptxed-stat_insn.ptt
new file mode 100644
index 000000000000..dc520b4ee14b
--- /dev/null
+++ b/test/src/ptxed-stat_insn.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that ptxed counts instructions correctly.
+;
+; opt:ptxed --stat --stat:insn
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+l1: nop
+l2: nop
+l3: nop
+
+; @pt p4:fup(3: %l4)
+; @pt p5:tip.pgd(0: %l5)
+l4: nop
+l5: hlt
+
+
+; @pt .exp(ptxed)
+;%0l0
+;%0l1
+;%0l2
+;%0l3
+;[disabled]
+;insn: 4.
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %0l0
+;%0p3 psbend
+;%0p4 fup 3: %0l4
+;%0p5 tip.pgd 0: %?l5.0
diff --git a/test/src/ptxed-tick.ptt b/test/src/ptxed-tick.ptt
new file mode 100644
index 000000000000..68a0572bdde5
--- /dev/null
+++ b/test/src/ptxed-tick.ptt
@@ -0,0 +1,98 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test tick events
+;
+; opt:ptdump --nom-freq 4 --time --time-delta
+; opt:ptxed --nom-freq 4 --event:tick --event:time
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: fup(3: %l0)
+; @pt p2: mode.exec(64bit)
+; @pt p3: tsc(0xa000)
+; @pt p4: cbr(0x2)
+; @pt p5: psbend()
+
+l0: mov rax, 0x0
+l1: jmp l3
+l2: add rax, 0x1
+l3: cmp rax, 0x1
+; @pt p6: cyc(0x4)
+; @pt p7: tnt(t.n)
+l4: jle l2
+
+; @pt p8: cyc(0x12)
+; @pt p9: tip(1: %l8)
+; @pt p10: cyc(0x8)
+; @pt p11: tnt(t)
+l5: call rax
+l6: nop
+
+; @pt p12: cyc(0x6)
+; @pt p13: fup(1: %l7)
+; @pt p14: tip.pgd(0: %l7)
+l7: hlt
+
+l8: ret
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 fup 3: %?l0
+;%0p2 mode.exec cs.l
+;%0p3 tsc a000 tsc +a000
+;%0p4 cbr 2
+;%0p5 psbend
+;%0p6 cyc 4 tsc +8
+;%0p7 tnt.8 !.
+;%0p8 cyc 12 tsc +24
+;%0p9 tip 1: %?l8.2
+;%0p10 cyc 8 tsc +10
+;%0p11 tnt.8 !
+;%0p12 cyc 6 tsc +c
+;%0p13 fup 1: %?l7.2
+;%0p14 tip.pgd 0: %?l7.0
+
+
+; @pt .exp(ptxed)
+;%0l0 # mov rax, 0x0
+;%0l1 # jmp l3
+;%0l3 # cmp rax, 0x1
+;%0l4 # jle l2
+;[000000000000a008 tick]
+;%0l2 # add rax, 0x1
+;%0l3 # cmp rax, 0x1
+;%0l4 # jle l2
+;%0l5 # call rax
+;[000000000000a02c tick]
+;%0l8 # ret
+;[000000000000a03c tick]
+;%0l6 # nop
+;[000000000000a048 disabled]
diff --git a/test/src/pwre-exstop_ip-pwrx.ptt b/test/src/pwre-exstop_ip-pwrx.ptt
new file mode 100644
index 000000000000..591149a8f56e
--- /dev/null
+++ b/test/src/pwre-exstop_ip-pwrx.ptt
@@ -0,0 +1,72 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a power event session.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+
+; @pt p4: pwre(c1.0, hw)
+; @pt p5: exstop(ip)
+; @pt p6: fup(1: %l1)
+l1: nop
+
+; @pt p7: pwrx(hw: c1, c0)
+l2: nop
+l3: nop
+
+; @pt p8:fup(1: %l3)
+; @pt p9:tip.pgd(0: %l4)
+l4: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 pwre c1.0, hw
+;%0p5 exstop ip
+;%0p6 fup 1: %?l1.2
+;%0p7 pwrx hw: c1, c0
+;%0p8 fup 1: %?l3.2
+;%0p9 tip.pgd 0: %?l4.0
+
+; @pt .exp(ptxed)
+;[pwre c1.0 hw]
+;%0l0
+;[exstop]
+;[pwrx hw: c1 (c0)]
+;%0l1
+;%0l2
+;[disabled]
diff --git a/test/src/ret_near_far.ptt b/test/src/ret_near_far.ptt
new file mode 100644
index 000000000000..d9568866e94f
--- /dev/null
+++ b/test/src/ret_near_far.ptt
@@ -0,0 +1,361 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that far returns are not considered for ret compression
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+l1: call l5
+l2: nop
+l3: nop
+l4: hlt
+
+l5: test eax, 0
+l6: jz l10
+l7: call l5
+l8: ret
+l9: hlt
+
+l10: call far [rax] ; l13
+l11: jmp l8
+l12: hlt
+
+l13: retf
+l14: hlt
+
+; Let's assume the call in l7 is executed 63 times. This doesn't make sense
+; from looking at the code above, but that's not the point, here.
+;
+; All calls are direct, so far, but we have a conditional jump in l6, which
+; is executed 64 times. On the 64th execution, it is taken and brings us to
+; the far call in l10.
+;
+; @pt p5: tnt64(nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn)
+; @pt p6: tnt64(nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnt)
+
+; Together with the call in l1 we now have a full return compression stack.
+;
+; @pt p7: tip(3: %l13)
+
+; The far return is not compressed.
+;
+; @pt p8: tip(3: %l11)
+
+; The following 64 returns are.
+;
+; @pt p9: tnt64(tttttttttttttttttttttttttttttttt)
+; @pt p10: tnt64(tttttttttttttttttttttttttttttttt)
+
+; Disable tracing to complete the test.
+;
+; @pt p11: fup(3: %l3)
+; @pt p12: tip.pgd(0: %l4)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tnt.64 ................................
+;%0p6 tnt.64 ...............................!
+;%0p7 tip 3: %0l13
+;%0p8 tip 3: %0l11
+;%0p9 tnt.64 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+;%0p10 tnt.64 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+;%0p11 fup 3: %0l3
+;%0p12 tip.pgd 0: %?l4.0
+
+
+; @pt .exp(ptxed)
+;%0l1 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l7 # call l5
+;%0l5 # test eax
+;%0l6 # jz l10
+;%0l10 # call far [rax] # l13
+;%0l13 # retf
+;%0l11 # jmp l8
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l8 # ret
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/skd007.ptt b/test/src/skd007.ptt
new file mode 100644
index 000000000000..56ba6ee526b0
--- /dev/null
+++ b/test/src/skd007.ptt
@@ -0,0 +1,81 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; SKD007: Intel(R) PT Buffer Overflow May Result in Incorrect Packets.
+;
+; Under complex micro-architectural conditions, an Intel PT (Processor
+; Trace) OVF (Overflow) packet may be issued after the first byte of a
+; multi-byte CYC (Cycle Count) packet, instead of any remaining bytes
+; of the CYC.
+;
+; cpu 6/78
+; cpu 6/94
+; cpu 6/142
+; cpu 6/158
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+l0: nop
+
+; The first CYC has its 2nd byte overwritten by OVF, which appears as
+; another CYC packet. The two CYCs will have payloads of:
+;
+; 0x3* or 0x2* and
+; 0x1e
+;
+; @pt p3: cyc(0x3e)
+; @pt p4: cyc(0x1e)
+; @pt p5: pad()
+; @pt p6: fup(3: %l1)
+l1: nop
+
+; @pt p7: fup(1: %l2)
+; @pt p8: tip.pgd(0: %l3)
+l2: nop
+l3: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 cyc 3e
+;%0p4 cyc 1e
+;%0p5 pad
+;%0p6 fup 3: %?l1
+;%0p7 fup 1: %?l2.2
+;%0p8 tip.pgd 0: %?l3.0
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;%0l1 # nop
+;[disabled]
diff --git a/test/src/skd010-mode_tsx-fup.ptt b/test/src/skd010-mode_tsx-fup.ptt
new file mode 100644
index 000000000000..9a30a0ae14be
--- /dev/null
+++ b/test/src/skd010-mode_tsx-fup.ptt
@@ -0,0 +1,76 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; SKD010: Intel(R) PT FUP May be Dropped After OVF.
+;
+; Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not
+; be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP
+; Packet, Packet Generation Enable).
+;
+; cpu 6/78
+; cpu 6/94
+; cpu 6/142
+; cpu 6/158
+;
+; Variant: Missing FUP, sync at MODE.TSX + FUP.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: psbend()
+
+; @pt p3: ovf()
+; fup missing
+
+; @pt p4: mode.tsx(begin)
+; @pt p5: fup(3: %l0)
+l0: nop
+
+; @pt p6: fup(1: %l1)
+; @pt p7: tip.pgd(0: %l2)
+l1: nop
+l2: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 psbend
+;%0p3 ovf
+;%0p4 mode.tsx intx
+;%0p5 fup 3: %?l0
+;%0p6 fup 1: %?l1.2
+;%0p7 tip.pgd 0: %?l2.0
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;[begin transaction]
+;? %0l0
+;[disabled]
diff --git a/test/src/skd010-psb.ptt b/test/src/skd010-psb.ptt
new file mode 100644
index 000000000000..5e25ec9e9225
--- /dev/null
+++ b/test/src/skd010-psb.ptt
@@ -0,0 +1,79 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; SKD010: Intel(R) PT FUP May be Dropped After OVF.
+;
+; Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not
+; be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP
+; Packet, Packet Generation Enable).
+;
+; cpu 6/78
+; cpu 6/94
+; cpu 6/142
+; cpu 6/158
+;
+; Variant: Missing FUP, sync at PSB+.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: psbend()
+
+; @pt p3: ovf()
+; fup missing
+
+; @pt p4: psb()
+; @pt p5: mode.exec(64bit)
+; @pt p6: fup(3: %l0)
+; @pt p7: psbend()
+l0: nop
+
+; @pt p8: fup(1: %l1)
+; @pt p9: tip.pgd(0: %l2)
+l1: nop
+l2: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 psbend
+;%0p3 ovf
+;%0p4 psb
+;%0p5 mode.exec cs.l
+;%0p6 fup 3: %?l0
+;%0p7 psbend
+;%0p8 fup 1: %?l1.2
+;%0p9 tip.pgd 0: %?l2.0
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;%0l0 # nop
+;[disabled]
diff --git a/test/src/skd010-tip.ptt b/test/src/skd010-tip.ptt
new file mode 100644
index 000000000000..6cc44a709aaf
--- /dev/null
+++ b/test/src/skd010-tip.ptt
@@ -0,0 +1,73 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; SKD010: Intel(R) PT FUP May be Dropped After OVF.
+;
+; Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not
+; be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP
+; Packet, Packet Generation Enable).
+;
+; cpu 6/78
+; cpu 6/94
+; cpu 6/142
+; cpu 6/158
+;
+; Variant: Missing FUP, sync at TIP.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: psbend()
+
+; @pt p3: ovf()
+; fup missing
+
+; @pt p4: tip(3: %l0)
+l0: nop
+
+; @pt p5: fup(1: %l1)
+; @pt p6: tip.pgd(0: %l2)
+l1: nop
+l2: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 psbend
+;%0p3 ovf
+;%0p4 tip 3: %?l0
+;%0p5 fup 1: %?l1.2
+;%0p6 tip.pgd 0: %?l2.0
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;%0l0 # nop
+;[disabled]
diff --git a/test/src/skd010-tip_pgd.ptt b/test/src/skd010-tip_pgd.ptt
new file mode 100644
index 000000000000..419eb00b32db
--- /dev/null
+++ b/test/src/skd010-tip_pgd.ptt
@@ -0,0 +1,84 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; SKD010: Intel(R) PT FUP May be Dropped After OVF.
+;
+; Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not
+; be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP
+; Packet, Packet Generation Enable).
+;
+; cpu 6/78
+; cpu 6/94
+; cpu 6/142
+; cpu 6/158
+;
+; Variant: Missing FUP, sync at TIP.PGD.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: hlt
+
+; @pt p4: ovf()
+; fup missing
+
+; @pt p5: tip.pgd(0: %l1)
+l1: hlt
+
+; We need to re-enable tracing in order to get the overflow indication
+; at the enable instruction.
+;
+; @pt p6: tip.pge(3: %l2)
+l2: nop
+
+; @pt p7: fup(1: %l3)
+; @pt p8: tip.pgd(0: %l4)
+l3: nop
+l4: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 ovf
+;%0p5 tip.pgd 0: %?l1.0
+;%0p6 tip.pge 3: %?l2
+;%0p7 fup 1: %?l3.2
+;%0p8 tip.pgd 0: %?l4.0
+
+
+; @pt .exp(ptxed)
+;[overflow]
+;[enabled]
+;%0l2
+;[disabled]
diff --git a/test/src/skd022.ptt b/test/src/skd022.ptt
new file mode 100644
index 000000000000..b74aa52f4f7e
--- /dev/null
+++ b/test/src/skd022.ptt
@@ -0,0 +1,81 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; SKD022: VM Entry That Clears TraceEn May Generate a FUP.
+;
+; If VM entry clears Intel(R) PT (Intel Processor Trace)
+; IA32_RTIT_CTL.TraceEn (MSR 570H, bit 0) while PacketEn is 1 then a
+; FUP (Flow Update Packet) will precede the TIP.PGD (Target IP Packet,
+; Packet Generation Disable). VM entry can clear TraceEn if the
+; VM-entry MSR-load area includes an entry for the IA32_RTIT_CTL MSR.
+;
+; cpu 6/78
+; cpu 6/94
+; cpu 6/142
+; cpu 6/158
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: vmlaunch
+
+; @pt p4: fup(1: %l0)
+; @pt p5: tip.pgd(0: %l1)
+l1: hlt
+
+; @pt p6: tip.pge(3: %l2)
+l2: nop
+
+; @pt p7: fup(1: %l3)
+; @pt p8: tip.pgd(0: %l4)
+l3: vmresume
+l4: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 fup 1: %?l0.2
+;%0p5 tip.pgd 0: %?l1.0
+;%0p6 tip.pge 3: %?l2
+;%0p7 fup 1: %?l3.2
+;%0p8 tip.pgd 0: %?l4.0
+
+
+; @pt .exp(ptxed)
+;%0l0 # vmlaunch
+;[disabled]
+;[enabled]
+;%0l2 # nop
+;%0l3 # vmresume
+;[disabled]
diff --git a/test/src/skl014-call.ptt b/test/src/skl014-call.ptt
new file mode 100644
index 000000000000..125d6b02e047
--- /dev/null
+++ b/test/src/skl014-call.ptt
@@ -0,0 +1,70 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; SKL014: Intel(R) PT TIP.PGD May Not Have Target IP Payload.
+;
+; When Intel PT (Intel Processor Trace) is enabled and a direct
+; unconditional branch clears IA32_RTIT_STATUS.FilterEn (MSR 571H,
+; bit 0), due to this erratum, the resulting TIP.PGD (Target IP
+; Packet, Packet Generation Disable) may not have an IP payload
+; with the target IP.
+;
+; Variant: call
+;
+; opt:ptxed --filter:addr0_cfg 1 --filter:addr0_a 0x1000 --filter:addr0_b 0x10ff
+;
+; cpu 6/78
+; cpu 6/94
+; cpu 6/142
+; cpu 6/158
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: call l2
+l1: hlt
+
+; @pt p4: tip.pgd(0: %l2)
+ALIGN 0x100
+l2: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 tip.pgd 0: %?l2.0
+
+
+; @pt .exp(ptxed)
+;%0l0 # call l2
+;[disabled]
diff --git a/test/src/skl014-jmp-jmp.ptt b/test/src/skl014-jmp-jmp.ptt
new file mode 100644
index 000000000000..7bdf14bc604f
--- /dev/null
+++ b/test/src/skl014-jmp-jmp.ptt
@@ -0,0 +1,74 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; SKD014: Intel(R) PT TIP.PGD May Not Have Target IP Payload.
+;
+; When Intel PT (Intel Processor Trace) is enabled and a direct
+; unconditional branch clears IA32_RTIT_STATUS.FilterEn (MSR 571H,
+; bit 0), due to this erratum, the resulting TIP.PGD (Target IP
+; Packet, Packet Generation Disable) may not have an IP payload
+; with the target IP.
+;
+; Variant: TIP.PGD binds to second jmp
+;
+; opt:ptxed --filter:addr0_cfg 1 --filter:addr0_a 0x1000 --filter:addr0_b 0x10ff
+;
+; cpu 6/78
+; cpu 6/94
+; cpu 6/142
+; cpu 6/158
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: jmp l2
+l1: hlt
+
+l2: jmp l4
+l3: hlt
+
+; @pt p4: tip.pgd(0: %l4)
+ALIGN 0x100
+l4: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 tip.pgd 0: %?l4.0
+
+
+; @pt .exp(ptxed)
+;%0l0 # jmp l2
+;%0l2 # jmp l4
+;[disabled]
diff --git a/test/src/skl014-jmp.ptt b/test/src/skl014-jmp.ptt
new file mode 100644
index 000000000000..93507ea482d9
--- /dev/null
+++ b/test/src/skl014-jmp.ptt
@@ -0,0 +1,70 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; SKL014: Intel(R) PT TIP.PGD May Not Have Target IP Payload.
+;
+; When Intel PT (Intel Processor Trace) is enabled and a direct
+; unconditional branch clears IA32_RTIT_STATUS.FilterEn (MSR 571H,
+; bit 0), due to this erratum, the resulting TIP.PGD (Target IP
+; Packet, Packet Generation Disable) may not have an IP payload
+; with the target IP.
+;
+; Variant: jmp
+;
+; opt:ptxed --filter:addr0_cfg 1 --filter:addr0_a 0x1000 --filter:addr0_b 0x10ff
+;
+; cpu 6/78
+; cpu 6/94
+; cpu 6/142
+; cpu 6/158
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: jmp l2
+l1: hlt
+
+; @pt p4: tip.pgd(0: %l2)
+ALIGN 0x100
+l2: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 tip.pgd 0: %?l2.0
+
+
+; @pt .exp(ptxed)
+;%0l0 # jmp l2
+;[disabled]
diff --git a/test/src/skl014-no_filter.ptt b/test/src/skl014-no_filter.ptt
new file mode 100644
index 000000000000..ce55a7afa04c
--- /dev/null
+++ b/test/src/skl014-no_filter.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2017-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TIP.PGD with suppressed IP payload is not applied to the next direct
+; branch if we do not have a filter configuration.
+;
+; cpu 6/78
+; cpu 6/94
+; cpu 6/142
+; cpu 6/158
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: fup(3: %l0)
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+l0: jmp l2
+l1: hlt
+
+l2: jmp far [rax] ; l4
+l3: hlt
+
+; @pt p4: tip.pgd(0: %l4)
+l4: hlt
+
+; @pt .exp(ptxed)
+;%0l0 # jmp l2
+;%0l2 # jmp far [rax] ; l4
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 fup 3: %0l0
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+;%0p4 tip.pgd 0: %?l4.0
diff --git a/test/src/syscall-sysret-cpl_0.ptt b/test/src/syscall-sysret-cpl_0.ptt
new file mode 100644
index 000000000000..0d2b04adb340
--- /dev/null
+++ b/test/src/syscall-sysret-cpl_0.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that SYSCALL followed by SYSRET are decoded correctly.
+;
+; Variant: cpl 3 filtered out
+;
+
+org 0x100000
+bits 64
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+
+; @pt p4: tip.pge(3: %l5)
+
+l1: syscall
+l2: nop
+l3: nop
+l4: hlt
+
+l5: nop
+l6: sysret
+l7: hlt
+
+; @pt p5: tip.pgd(0: %l2)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+;%0p4 tip.pge 3: %0l5
+;%0p5 tip.pgd 0: %?l2.0
+
+; @pt .exp(ptxed)
+;[enabled]
+;%0l5 # nop
+;%0l6 # sysret
+;[disabled]
diff --git a/test/src/syscall-sysret-cpl_3.ptt b/test/src/syscall-sysret-cpl_3.ptt
new file mode 100644
index 000000000000..8685adaf2b52
--- /dev/null
+++ b/test/src/syscall-sysret-cpl_3.ptt
@@ -0,0 +1,71 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that SYSCALL followed by SYSRET are decoded correctly.
+;
+; Variant: cpl 0 filtered out
+;
+
+org 0x100000
+bits 64
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+; @pt p5: tip.pgd(0: %l5)
+
+l1: syscall
+l2: nop
+l3: nop
+l4: hlt
+
+l5: nop
+l6: sysret
+l7: hlt
+
+; @pt p6: tip.pge(3: %l2)
+
+; @pt p7: fup(1: %l3)
+; @pt p8: tip.pgd(0: %l4)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip.pgd 0: %?l5.0
+;%0p6 tip.pge 3: %?l2
+;%0p7 fup 1: %?l3.2
+;%0p8 tip.pgd 0: %?l4.0
+
+; @pt .exp(ptxed)
+;%0l1 # syscall
+;[disabled]
+;[resumed]
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/syscall-sysret.ptt b/test/src/syscall-sysret.ptt
new file mode 100644
index 000000000000..26f67c671f6d
--- /dev/null
+++ b/test/src/syscall-sysret.ptt
@@ -0,0 +1,71 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that SYSCALL followed by SYSRET are decoded correctly.
+;
+; Variant: no cpl filtering
+;
+
+org 0x100000
+bits 64
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+; @pt p5: tip(3: %l5)
+
+l1: syscall
+l2: nop
+l3: nop
+l4: hlt
+
+l5: nop
+l6: sysret
+l7: hlt
+
+; @pt p6: tip(3: %l2)
+
+; @pt p7: fup(1: %l3)
+; @pt p8: tip.pgd(0: %l4)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip 3: %0l5
+;%0p6 tip 3: %0l2
+;%0p7 fup 1: %?l3.2
+;%0p8 tip.pgd 0: %?l4.0
+
+; @pt .exp(ptxed)
+;%0l1 # syscall
+;%0l5 # nop
+;%0l6 # sysret
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/sysenter-sysexit-cpl_0.ptt b/test/src/sysenter-sysexit-cpl_0.ptt
new file mode 100644
index 000000000000..a4d66371d29a
--- /dev/null
+++ b/test/src/sysenter-sysexit-cpl_0.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that SYSENTER followed by SYSEXIT are decoded correctly.
+;
+; Variant: cpl 3 filtered out
+;
+
+org 0x100000
+bits 64
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+
+; @pt p4: tip.pge(3: %l5)
+
+l1: db 0x0f, 0x34 ; sysenter
+l2: nop
+l3: nop
+l4: hlt
+
+l5: nop
+l6: db 0x0f, 0x35 ; sysexit
+l7: hlt
+
+; @pt p5: tip.pgd(0: %l2)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+;%0p4 tip.pge 3: %0l5
+;%0p5 tip.pgd 0: %?l2.0
+
+; @pt .exp(ptxed)
+;[enabled]
+;%0l5 # nop
+;%0l6 # sysexit
+;[disabled]
diff --git a/test/src/sysenter-sysexit-cpl_3.ptt b/test/src/sysenter-sysexit-cpl_3.ptt
new file mode 100644
index 000000000000..986811262816
--- /dev/null
+++ b/test/src/sysenter-sysexit-cpl_3.ptt
@@ -0,0 +1,71 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that SYSENTER followed by SYSEXIT are decoded correctly.
+;
+; Variant: cpl 0 filtered out
+;
+
+org 0x100000
+bits 64
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+; @pt p5: tip.pgd(0: %l5)
+
+l1: db 0x0f, 0x34 ; sysenter
+l2: nop
+l3: nop
+l4: hlt
+
+l5: nop
+l6: db 0x0f, 0x35 ; sysexit
+l7: hlt
+
+; @pt p6: tip.pge(3: %l2)
+
+; @pt p7: fup(1: %l3)
+; @pt p8: tip.pgd(0: %l4)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip.pgd 0: %?l5.0
+;%0p6 tip.pge 3: %?l2
+;%0p7 fup 1: %?l3.2
+;%0p8 tip.pgd 0: %?l4.0
+
+; @pt .exp(ptxed)
+;%0l1 # sysenter
+;[disabled]
+;[resumed]
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/sysenter-sysexit.ptt b/test/src/sysenter-sysexit.ptt
new file mode 100644
index 000000000000..c1314954e1bb
--- /dev/null
+++ b/test/src/sysenter-sysexit.ptt
@@ -0,0 +1,71 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that SYSENTER followed by SYSEXIT are decoded correctly.
+;
+; Variant: no cpl filtering
+;
+
+org 0x100000
+bits 64
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+; @pt p5: tip(3: %l5)
+
+l1: db 0x0f, 0x34 ; sysenter
+l2: nop
+l3: nop
+l4: hlt
+
+l5: nop
+l6: db 0x0f, 0x35 ; sysexit
+l7: hlt
+
+; @pt p6: tip(3: %l2)
+
+; @pt p7: fup(1: %l3)
+; @pt p8: tip.pgd(0: %l4)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip 3: %0l5
+;%0p6 tip 3: %0l2
+;%0p7 fup 1: %?l3.2
+;%0p8 tip.pgd 0: %?l4.0
+
+; @pt .exp(ptxed)
+;%0l1 # sysenter
+;%0l5 # nop
+;%0l6 # sysexit
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/tip-eos.ptt b/test/src/tip-eos.ptt
new file mode 100644
index 000000000000..6bb72e2f1aee
--- /dev/null
+++ b/test/src/tip-eos.ptt
@@ -0,0 +1,55 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that we indicate the end of the trace without a TIP.PGD.
+;
+; Variant: the trace ends after an indirect branch
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: jmp rax
+l1: hlt
+
+; @pt p4:tip(3: %l2)
+l2: hlt
+
+
+; @pt .exp(ptxed)
+;%0l0
+;[end of trace]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %0l0
+;%0p3 psbend
+;%0p4 tip 3: %0l2
diff --git a/test/src/tip_pgd-direct_call.ptt b/test/src/tip_pgd-direct_call.ptt
new file mode 100644
index 000000000000..a6bf2ada2c70
--- /dev/null
+++ b/test/src/tip_pgd-direct_call.ptt
@@ -0,0 +1,58 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TIP.PGD is applied to the next direct branch (call in this case)
+; whose target matches the TIP.PGD payload.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: call l3
+l2: hlt
+l3: call l5
+l4: hlt
+
+; @pt p5: tip.pgd(3: %l5)
+l5: nop
+
+
+; @pt .exp(ptxed)
+;%0l1 # call l3
+;%0l3 # call l5
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip.pgd 3: %0l5
diff --git a/test/src/tip_pgd-direct_jump.ptt b/test/src/tip_pgd-direct_jump.ptt
new file mode 100644
index 000000000000..14e61222361f
--- /dev/null
+++ b/test/src/tip_pgd-direct_jump.ptt
@@ -0,0 +1,58 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TIP.PGD is applied to the next direct branch (jump in this case)
+; whose target matches the TIP.PGD payload.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: jmp l3
+l2: hlt
+l3: jmp l5
+l4: hlt
+
+; @pt p5: tip.pgd(3: %l5)
+l5: nop
+
+
+; @pt .exp(ptxed)
+;%0l1 # jmp l3
+;%0l3 # jmp l5
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip.pgd 3: %0l5
diff --git a/test/src/tip_pgd-exstop-tip_pge.ptt b/test/src/tip_pgd-exstop-tip_pge.ptt
new file mode 100644
index 000000000000..02406b15d63a
--- /dev/null
+++ b/test/src/tip_pgd-exstop-tip_pge.ptt
@@ -0,0 +1,70 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a power event session while tracing is disabled.
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: jle l1
+l1: hlt
+
+; @pt p4: tip.pgd(1: %l1)
+
+; @pt p5: exstop()
+
+; @pt p6: tip.pge(3: %l2)
+l2: nop
+l3: nop
+
+; @pt p7:fup(1: %l3)
+; @pt p8:tip.pgd(0: %l4)
+l4: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 tip.pgd 1: %?l1.2
+;%0p5 exstop
+;%0p6 tip.pge 3: %?l2
+;%0p7 fup 1: %?l3.2
+;%0p8 tip.pgd 0: %?l4.0
+
+; @pt .exp(ptxed)
+;%0l0
+;[disabled]
+;[exstop]
+;[enabled]
+;%0l2
+;[disabled]
diff --git a/test/src/tip_pgd-indirect_call.ptt b/test/src/tip_pgd-indirect_call.ptt
new file mode 100644
index 000000000000..9c5635d0fd54
--- /dev/null
+++ b/test/src/tip_pgd-indirect_call.ptt
@@ -0,0 +1,58 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TIP.PGD is applied to the next branch (call in this case) that
+; would normally generate a TIP packet.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: call l3
+l2: hlt
+l3: call rax
+l4: hlt
+
+; @pt p5: tip.pgd(3: %l5)
+l5: nop
+
+
+; @pt .exp(ptxed)
+;%0l1 # call l3
+;%0l3 # call rax
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip.pgd 3: %0l5
diff --git a/test/src/tip_pgd-indirect_jump.ptt b/test/src/tip_pgd-indirect_jump.ptt
new file mode 100644
index 000000000000..e2e15732a872
--- /dev/null
+++ b/test/src/tip_pgd-indirect_jump.ptt
@@ -0,0 +1,58 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TIP.PGD is applied to the next branch (jump in this case) that
+; would normally generate a TIP packet.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: jmp l3
+l2: hlt
+l3: jmp rax
+l4: hlt
+
+; @pt p5: tip.pgd(3: %l5)
+l5: nop
+
+
+; @pt .exp(ptxed)
+;%0l1 # jmp l3
+;%0l3 # jmp rax
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip.pgd 3: %0l5
diff --git a/test/src/tip_pgd-pip-tip_pge.ptt b/test/src/tip_pgd-pip-tip_pge.ptt
new file mode 100644
index 000000000000..0f3eaf294df6
--- /dev/null
+++ b/test/src/tip_pgd-pip-tip_pge.ptt
@@ -0,0 +1,71 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that a PIP is processed while tracing is disabled.
+;
+; Variant: disable during normal tracing.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: syscall
+
+; @pt p5: tip.pgd(0: %l2)
+l2: hlt
+
+; @pt p6: pip(0xa00)
+; @pt p7: tip.pge(3: %l3)
+l3: nop
+
+l4: nop
+l5: hlt
+; @pt p8: fup(1: %l4)
+; @pt p9: tip.pgd(0: %l5)
+
+
+; @pt .exp(ptxed)
+;%0l1 # syscall
+;[disabled]
+;[paging, cr3: 0000000000000a00]
+;[enabled]
+;%0l3 # nop
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %?l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip.pgd 0: %?l2.0
+;%0p6 pip a00 cr3 0000000000000a00
+;%0p7 tip.pge 3: %?l3
+;%0p8 fup 1: %?l4.2
+;%0p9 tip.pgd 0: %?l5.0
diff --git a/test/src/tip_pgd-psb-stop.ptt b/test/src/tip_pgd-psb-stop.ptt
new file mode 100644
index 000000000000..885378718e03
--- /dev/null
+++ b/test/src/tip_pgd-psb-stop.ptt
@@ -0,0 +1,64 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TraceStop is applied to the same instruction as a preceding TIP.PGD.
+;
+; Variant: encountered PSB+ between TIP.PGD and TraceStop
+;
+
+org 0x100000
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: nop
+
+
+; @pt p5: fup(1: %l2)
+; @pt p6: tip.pgd(0: %l3)
+l2: nop
+l3: hlt
+
+; @pt p7: psb()
+; @pt p8: psbend()
+; @pt p9: stop()
+
+; @pt .exp(ptxed)
+;%0l1 # nop
+;[disabled]
+;[stopped]
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 fup 1: %?l2.2
+;%0p6 tip.pgd 0: %?l3.0
+;%0p7 psb
+;%0p8 psbend
+;%0p9 stop
diff --git a/test/src/tip_pgd-stop.ptt b/test/src/tip_pgd-stop.ptt
new file mode 100644
index 000000000000..b9d0d99d452c
--- /dev/null
+++ b/test/src/tip_pgd-stop.ptt
@@ -0,0 +1,59 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TraceStop is applied to the same instruction as a preceding TIP.PGD.
+;
+; Variant: encountered during normal tracing, sync disable.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: fup(3: %l0)
+; @pt p4: psbend()
+l0: jmp far [rax]
+
+
+; @pt p5: tip.pgd(0: %l1)
+; @pt p6: stop()
+l1: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 fup 3: %?l0
+;%0p4 psbend
+;%0p5 tip.pgd 0: %?l1.0
+;%0p6 stop
+
+
+; @pt .exp(ptxed)
+;%0l0
+;[disabled]
+;[stopped]
diff --git a/test/src/tip_pgd-tnt_not_taken.ptt b/test/src/tip_pgd-tnt_not_taken.ptt
new file mode 100644
index 000000000000..d960697110cd
--- /dev/null
+++ b/test/src/tip_pgd-tnt_not_taken.ptt
@@ -0,0 +1,61 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TIP.PGD is applied to the next branch that would normally
+; generate a TNT packet.
+;
+; Variant: disable on not taken.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: jle l3
+; @pt p5: tnt(t)
+l2: hlt
+l3: jle l5
+l4: nop
+l5: hlt
+; @pt p6: tip.pgd(3: %l4)
+
+
+; @pt .exp(ptxed)
+;%0l1 # jle l3
+;%0l3 # jle l5
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tnt.8 !
+;%0p6 tip.pgd 3: %0l4
diff --git a/test/src/tip_pgd-tnt_taken.ptt b/test/src/tip_pgd-tnt_taken.ptt
new file mode 100644
index 000000000000..da14d386552f
--- /dev/null
+++ b/test/src/tip_pgd-tnt_taken.ptt
@@ -0,0 +1,61 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TIP.PGD is applied to the next branch that would normally
+; generate a TNT packet.
+;
+; Variant: disable on taken.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: jle l3
+; @pt p5: tnt(t)
+l2: hlt
+l3: jle l5
+l4: hlt
+l5: nop
+; @pt p6: tip.pgd(3: %l5)
+
+
+; @pt .exp(ptxed)
+;%0l1 # jle l3
+;%0l3 # jle l5
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tnt.8 !
+;%0p6 tip.pgd 3: %0l5
diff --git a/test/src/tip_pgd-tsx.ptt b/test/src/tip_pgd-tsx.ptt
new file mode 100644
index 000000000000..0dbbc8889b88
--- /dev/null
+++ b/test/src/tip_pgd-tsx.ptt
@@ -0,0 +1,78 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TSX state is applied correctly when branch tracing is disabled.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+
+; @pt p4: mode.tsx(begin)
+; @pt p5: tip.pge(3: %l1)
+l1: nop
+
+; @pt p6: fup(1: %l2)
+; @pt p7: tip.pgd(0: %l3)
+l2: nop
+l3: hlt
+
+; @pt p8: mode.tsx(abort)
+; @pt p9: tip.pge(3: %l4)
+l4: nop
+
+; @pt p10: fup(1: %l5)
+; @pt p11: tip.pgd(0: %l6)
+l5: nop
+l6: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+;%0p4 mode.tsx intx
+;%0p5 tip.pge 3: %?l1
+;%0p6 fup 1: %?l2.2
+;%0p7 tip.pgd 0: %?l3.0
+;%0p8 mode.tsx abrt
+;%0p9 tip.pge 3: %?l4
+;%0p10 fup 1: %?l5.2
+;%0p11 tip.pgd 0: %?l6.0
+
+
+; @pt .exp(ptxed)
+;[begin transaction]
+;[enabled]
+;? %0l1 # nop
+;[disabled]
+;[aborted]
+;[enabled]
+;%0l4 # nop
+;[disabled]
diff --git a/test/src/tip_pgd_noip-far_jump.ptt b/test/src/tip_pgd_noip-far_jump.ptt
new file mode 100644
index 000000000000..0f450a1b2ae2
--- /dev/null
+++ b/test/src/tip_pgd_noip-far_jump.ptt
@@ -0,0 +1,54 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TIP.PGD with suppressed IP payload is applied to the next far branch
+; (far jump in this case).
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: jmp far [rax] ; l3
+l2: hlt
+; @pt p5: tip.pgd(0: %l3)
+
+l3: hlt
+
+; @pt .exp(ptxed)
+;%0l1 # jmp far [rax] ; l3
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip.pgd 0: %?l3.0
diff --git a/test/src/tip_pgd_noip-mov_cr3.ptt b/test/src/tip_pgd_noip-mov_cr3.ptt
new file mode 100644
index 000000000000..543053189ca5
--- /dev/null
+++ b/test/src/tip_pgd_noip-mov_cr3.ptt
@@ -0,0 +1,54 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TIP.PGD with suppressed IP payload is applied to the next
+; MOV CR3 instruction.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+
+l1: mov cr3, rax
+l2: hlt
+; @pt p5: tip.pgd(0: %l2)
+
+
+; @pt .exp(ptxed)
+;%0l1 # mov cr3, rax
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tip.pgd 0: %?l2.0
diff --git a/test/src/tip_pge-exstop.ptt b/test/src/tip_pge-exstop.ptt
new file mode 100644
index 000000000000..88db42461397
--- /dev/null
+++ b/test/src/tip_pge-exstop.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test an execution stop event.
+;
+; Variant: before the first instruction, tracing starts disabled
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: psbend()
+
+; @pt p3: tip.pge(3: %l0)
+l0: nop
+
+; @pt p4: exstop(ip)
+; @pt p5: fup(1: %l0)
+
+; @pt p6:fup(1: %l1)
+; @pt p7:tip.pgd(0: %l1)
+l1: hlt
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 psbend
+;%0p3 tip.pge 3: %?l0
+;%0p4 exstop ip
+;%0p5 fup 1: %?l0.2
+;%0p6 fup 1: %?l1.2
+;%0p7 tip.pgd 0: %?l1.0
+
+; @pt .exp(ptxed)
+;[enabled]
+;[exstop]
+;%0l0
+;[disabled] \ No newline at end of file
diff --git a/test/src/tip_pge-fup-tip_pgd-tip_pge.ptt b/test/src/tip_pge-fup-tip_pgd-tip_pge.ptt
new file mode 100644
index 000000000000..b5329e76ce35
--- /dev/null
+++ b/test/src/tip_pge-fup-tip_pgd-tip_pge.ptt
@@ -0,0 +1,66 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a combination of enable and async disable on the same IP.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+; @pt p4: tip.pge(3: %l1)
+l1: nop
+; @pt p5: fup(1: %l1)
+; @pt p6: tip.pgd(0: %l1)
+; @pt p7: tip.pge(3: %l1)
+l2: nop
+l3: nop
+; @pt p8: fup(1: %l3)
+; @pt p9: tip.pgd(0: %l4)
+l4: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+;%0p4 tip.pge 3: %0l1
+;%0p5 fup 1: %?l1.2
+;%0p6 tip.pgd 0: %?l1.0
+;%0p7 tip.pge 3: %0l1
+;%0p8 fup 1: %?l3.2
+;%0p9 tip.pgd 0: %?l4.0
+
+
+; @pt .exp(ptxed)
+;[enabled]
+;[disabled]
+;[resumed]
+;%0l1 # nop
+;%0l2 # nop
+;[disabled]
diff --git a/test/src/tip_pge-fup-tip_pgd.ptt b/test/src/tip_pge-fup-tip_pgd.ptt
new file mode 100644
index 000000000000..f7592ed37ef6
--- /dev/null
+++ b/test/src/tip_pge-fup-tip_pgd.ptt
@@ -0,0 +1,56 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test enable and async disable around a single instruction.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+; @pt p4: tip.pge(3: %l1)
+l1: nop
+l2: nop
+; @pt p5: fup(1: %l2)
+; @pt p6: tip.pgd(0: %l3)
+l3: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+;%0p4 tip.pge 3: %0l1
+;%0p5 fup 1: %?l2.2
+;%0p6 tip.pgd 0: %?l3.0
+
+
+; @pt .exp(ptxed)
+;[enabled]
+;%0l1 # nop
+;[disabled]
diff --git a/test/src/tip_pge-ptw-fup-tip_pgd.ptt b/test/src/tip_pge-ptw-fup-tip_pgd.ptt
new file mode 100644
index 000000000000..86c70fa7a912
--- /dev/null
+++ b/test/src/tip_pge-ptw-fup-tip_pgd.ptt
@@ -0,0 +1,75 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test PTW
+;
+; Variant: tracing is enabled, FUP on PTW
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: fup(3: %l0)
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+l0: nop
+
+; @pt p4: ptw(0: 0xabcd, ip)
+; @pt p5: fup(1: %l1)
+l1: db 0xf3, 0x0f, 0xae, 0xe0 ; ptwrite eax
+
+; @pt p6: ptw(1: 0xef09, ip)
+; @pt p7: fup(1: %l2)
+l2: db 0xf3, 0x48, 0x0f, 0xae, 0xe0 ; ptwrite rax
+l3: nop
+
+; @pt p8: fup(1: %l4)
+; @pt p9: tip.pgd(0: %l4)
+l4: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 fup 3: %?l0
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+;%0p4 ptw 0: abcd, ip
+;%0p5 fup 1: %?l1.2
+;%0p6 ptw 1: ef09, ip
+;%0p7 fup 1: %?l2.2
+;%0p8 fup 1: %?l4.2
+;%0p9 tip.pgd 0: %?l4.0
+
+
+; @pt .exp(ptxed)
+;%0l0 # nop
+;%0l1 # ptwrite eax
+;[ptwrite: abcd]
+;%0l2 # ptwrite rax
+;[ptwrite: ef09]
+;%0l3 # nop
+;[disabled]
diff --git a/test/src/tip_pge-ptw-tip_pgd.ptt b/test/src/tip_pge-ptw-tip_pgd.ptt
new file mode 100644
index 000000000000..1e3728354590
--- /dev/null
+++ b/test/src/tip_pge-ptw-tip_pgd.ptt
@@ -0,0 +1,72 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test PTW
+;
+; Variant: tracing is enabled, no FUP on PTW
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: fup(3: %l0)
+; @pt p2: mode.exec(64bit)
+; @pt p3: psbend()
+l0: nop
+
+; @pt p4: ptw(0: 0xabcd)
+l1: db 0xf3, 0x0f, 0xae, 0xe0 ; ptwrite eax
+
+; @pt p5: ptw(1: 0xef09)
+l2: db 0xf3, 0x48, 0x0f, 0xae, 0xe0 ; ptwrite rax
+l3: nop
+
+; @pt p6: fup(1: %l4)
+; @pt p7: tip.pgd(0: %l5)
+l4: nop
+l5: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 fup 3: %?l0
+;%0p2 mode.exec cs.l
+;%0p3 psbend
+;%0p4 ptw 0: abcd
+;%0p5 ptw 1: ef09
+;%0p6 fup 1: %?l4.2
+;%0p7 tip.pgd 0: %?l5.0
+
+
+; @pt .exp(ptxed)
+;%0l0 # nop
+;%0l1 # ptwrite eax
+;[ptwrite: abcd]
+;%0l2 # ptwrite rax
+;[ptwrite: ef09]
+;%0l3 # nop
+;[disabled]
diff --git a/test/src/tip_pge-pwre-pwrx-tip_pgd.ptt b/test/src/tip_pge-pwre-pwrx-tip_pgd.ptt
new file mode 100644
index 000000000000..016b9f24a4b9
--- /dev/null
+++ b/test/src/tip_pge-pwre-pwrx-tip_pgd.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test a power event session.
+;
+; Variant: between enable and async-disable without progress
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: psbend()
+
+; @pt p3: tip.pge(3: %l0)
+l0: hlt
+
+; @pt p4: pwre(c1.0, hw)
+; @pt p5: pwrx(hw: c1, c0)
+
+; @pt p6:fup(1: %l0)
+; @pt p7:tip.pgd(0: %l0)
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 psbend
+;%0p3 tip.pge 3: %?l0
+;%0p4 pwre c1.0, hw
+;%0p5 pwrx hw: c1, c0
+;%0p6 fup 1: %?l0.2
+;%0p7 tip.pgd 0: %?l0.0
+
+; @pt .exp(ptxed)
+;[enabled]
+;[pwre c1.0 hw]
+;[pwrx hw: c1 (c0)]
+;[disabled] \ No newline at end of file
diff --git a/test/src/tip_pge-tsx_abort-tip-fup-tip_pgd.ptt b/test/src/tip_pge-tsx_abort-tip-fup-tip_pgd.ptt
new file mode 100644
index 000000000000..3c05bd77c833
--- /dev/null
+++ b/test/src/tip_pge-tsx_abort-tip-fup-tip_pgd.ptt
@@ -0,0 +1,73 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TSX aborts are shown correctly.
+;
+; Variant: the abort happens when executing the first instruction.
+; tracing is disabled after jumping to the abort handler.
+;
+; opt:ptxed --event:ip
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: psbend()
+
+; @pt p3: mode.tsx(begin)
+; @pt p4: tip.pge(3: %l0)
+l0: hlt
+
+; @pt p5: mode.tsx(abort)
+; @pt p6: fup(1: %l0)
+; @pt p7: tip(1: %l1)
+l1: hlt
+
+; @pt p8: fup(1: %l1)
+; @pt p9: tip.pgd(0: %l1)
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 psbend
+;%0p3 mode.tsx intx
+;%0p4 tip.pge 3: %?l0
+;%0p5 mode.tsx abrt
+;%0p6 fup 1: %?l0.2
+;%0p7 tip 1: %?l1.2
+;%0p8 fup 1: %?l1.2
+;%0p9 tip.pgd 0: %?l1.0
+
+
+; @pt .exp(ptxed)
+;[begin transaction]
+;[enabled, ip: %0l0]
+;[aborted, ip: %0l0]
+;[interrupt, from: %0l0, to: %0l1]
+;[disabled, at: %0l1]
diff --git a/test/src/tip_pge-tsx_abort-tip_pgd.ptt b/test/src/tip_pge-tsx_abort-tip_pgd.ptt
new file mode 100644
index 000000000000..e3f90b1db3e8
--- /dev/null
+++ b/test/src/tip_pge-tsx_abort-tip_pgd.ptt
@@ -0,0 +1,67 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TSX aborts are shown correctly.
+;
+; Variant: the abort happens when executing the first instruction.
+; tracing is disabled when jumping to the abort handler.
+;
+; opt:ptxed --event:ip
+;
+
+org 0x100000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: psbend()
+
+; @pt p3: mode.tsx(begin)
+; @pt p4: tip.pge(3: %l0)
+l0: hlt
+
+; @pt p5: mode.tsx(abort)
+; @pt p6: fup(1: %l0)
+; @pt p7: tip.pgd(1: %l1)
+l1: hlt
+
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 psbend
+;%0p3 mode.tsx intx
+;%0p4 tip.pge 3: %?l0
+;%0p5 mode.tsx abrt
+;%0p6 fup 1: %?l0.2
+;%0p7 tip.pgd 1: %?l1.2
+
+
+; @pt .exp(ptxed)
+;[begin transaction]
+;[enabled, ip: %0l0]
+;[aborted, ip: %0l0]
+;[disabled, at: %0l0, ip: %0l1]
diff --git a/test/src/tnt-tip_pgd_noip-sysret.ptt b/test/src/tnt-tip_pgd_noip-sysret.ptt
new file mode 100644
index 000000000000..2cb52725f73f
--- /dev/null
+++ b/test/src/tnt-tip_pgd_noip-sysret.ptt
@@ -0,0 +1,64 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TIP.PGD with suppressed IP payload is applied to the next far branch
+; (sysret in this case).
+;
+; Variant: consume a TNT before to test that the disable event is not
+; applied too early.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: jle l3
+; @pt p5: tnt(tt)
+l2: hlt
+l3: jle l5
+l4: hlt
+l5: sysret
+l6: hlt
+; @pt p6: tip.pgd(0: %l6)
+
+
+; @pt .exp(ptxed)
+;%0l1 # jle l3
+;%0l3 # jle l5
+;%0l5 # sysret
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 tnt.8 !!
+;%0p6 tip.pgd 0: %?l6.0
diff --git a/test/src/tnt_n-eos.ptt b/test/src/tnt_n-eos.ptt
new file mode 100644
index 000000000000..0fd6a67f7b9d
--- /dev/null
+++ b/test/src/tnt_n-eos.ptt
@@ -0,0 +1,55 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that we indicate the end of the trace without a TIP.PGD.
+;
+; Variant: the trace ends after a non-taken conditional branch
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: je l2
+l1: hlt
+
+; @pt p4:tnt(n)
+l2: hlt
+
+
+; @pt .exp(ptxed)
+;%0l0
+;[end of trace]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %0l0
+;%0p3 psbend
+;%0p4 tnt.8 .
diff --git a/test/src/tnt_t-eos.ptt b/test/src/tnt_t-eos.ptt
new file mode 100644
index 000000000000..4b0c12cd1f13
--- /dev/null
+++ b/test/src/tnt_t-eos.ptt
@@ -0,0 +1,55 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that we indicate the end of the trace without a TIP.PGD.
+;
+; Variant: the trace ends after a taken conditional branch
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: je l2
+l1: hlt
+
+; @pt p4:tnt(t)
+l2: hlt
+
+
+; @pt .exp(ptxed)
+;%0l0
+;[end of trace]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %0l0
+;%0p3 psbend
+;%0p4 tnt.8 !
diff --git a/test/src/truncated.ptt b/test/src/truncated.ptt
new file mode 100644
index 000000000000..3651f3188bc4
--- /dev/null
+++ b/test/src/truncated.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test an instruction crossing section boundaries.
+;
+; opt:ptxed --raw truncated.bin:0x2:0x1002
+;
+
+org 0x1000
+bits 64
+
+; @pt p0: psb()
+; @pt p1: mode.exec(64bit)
+; @pt p2: fup(3: %l0)
+; @pt p3: psbend()
+l0: nop
+
+l1: jmp l3
+l2: hlt
+
+l3: nop
+
+; @pt p4: fup(1: %l4)
+; @pt p5: tip.pgd(0: %l4)
+l4: hlt
+
+
+; @pt .exp(ptxed)
+;%0l0 # nop
+;%0l1 # jmp l3
+;%0l3 # nop
+;[disabled]
+
+; @pt .exp(ptdump)
+;%0p0 psb
+;%0p1 mode.exec cs.l
+;%0p2 fup 3: %?l0
+;%0p3 psbend
+;%0p4 fup 1: %?l4.2
+;%0p5 tip.pgd 0: %?l4.0
diff --git a/test/src/tsc-cbr-cyc-tsc.ptt b/test/src/tsc-cbr-cyc-tsc.ptt
new file mode 100644
index 000000000000..90dcea6db2d4
--- /dev/null
+++ b/test/src/tsc-cbr-cyc-tsc.ptt
@@ -0,0 +1,57 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test CYC-based TSC estimation.
+;
+; Variant: CBR-based calibration, time correction on TSC
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --nom-freq 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: tsc(0xa0000)
+; @pt p3: cbr(0x2)
+; @pt p4: psbend()
+
+; @pt p5: cyc(0x3)
+; @pt p6: cyc(0x1)
+
+; @pt p7: tsc(0xa0007)
+; @pt p8: cyc(0x2)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 tsc a0000 tsc +a0000
+;%0p3 cbr 2
+;%0p4 psbend
+;%0p5 cyc 3 tsc +6
+;%0p6 cyc 1 tsc +2
+;%0p7 tsc a0007 tsc -1
+;%0p8 cyc 2 tsc +4
diff --git a/test/src/tsc-cyc_calibrate.ptt b/test/src/tsc-cyc_calibrate.ptt
new file mode 100644
index 000000000000..9e3c5e9324b4
--- /dev/null
+++ b/test/src/tsc-cyc_calibrate.ptt
@@ -0,0 +1,69 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test CYC-based TSC estimation.
+;
+; Variant: TSC-based calibration
+;
+; opt:ptdump --time --time-delta
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: tsc(0xa0000)
+; @pt p3: psbend()
+
+; @pt p4: cyc(0x100)
+; @pt p5: tsc(0xa0200)
+; @pt p6: cyc(0x100)
+
+; @pt p7: psb()
+; @pt p8: tsc(0xa0300)
+; @pt p9: psbend()
+
+; @pt p10: cyc(0x100)
+; @pt p11: tsc(0xa0600)
+; @pt p12: cyc(0x100)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 tsc a0000 tsc +a0000
+;%0p3 psbend
+;[%p4: calibration error: no timing information]
+;[%p4: error updating time: no calibration]
+;%0p4 cyc 100 tsc +0
+;%0p5 tsc a0200 tsc +200
+;[%p6: calibration error: no timing information]
+;[%p6: error updating time: no calibration]
+;%0p6 cyc 100 tsc +0
+;%0p7 psb
+;%0p8 tsc a0300 tsc +100
+;%0p9 psbend
+;%0p10 cyc 100 tsc +100
+;%0p11 tsc a0600 tsc +200
+;%0p12 cyc 100 tsc +100
diff --git a/test/src/tsc-mtc-tma-mtc.ptt b/test/src/tsc-mtc-tma-mtc.ptt
new file mode 100644
index 000000000000..fad39ab846e0
--- /dev/null
+++ b/test/src/tsc-mtc-tma-mtc.ptt
@@ -0,0 +1,52 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC-based TSC estimation.
+;
+; Variant: MTC between TSC and TMA are ignored
+;
+; opt:ptdump --time --time-delta --no-tcal
+; opt:ptdump --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: mtc(0xc1)
+; @pt p5: tma(0xc2d2, 0xe)
+; @pt p6: mtc(0xc3)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 mtc c1 tsc +0
+;%0p5 tma c2d2, e tsc +0
+;%0p6 mtc c3 tsc +aa
diff --git a/test/src/tsc-tma-cbr-cyc-mtc.ptt b/test/src/tsc-tma-cbr-cyc-mtc.ptt
new file mode 100644
index 000000000000..116635e589b9
--- /dev/null
+++ b/test/src/tsc-tma-cbr-cyc-mtc.ptt
@@ -0,0 +1,57 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based TSC estimation.
+;
+; Variant: CBR-based calibration,
+; CYC between TMA and MTC, time corrected on MTC
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --nom-freq 4 --mtc-freq 4 --cpuid-0x15.eax 2 --cpuid-0x15.ebx 1
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0x12, 0x4)
+; @pt p5: cbr(0x2)
+; @pt p6: cyc(0x3)
+; @pt p7: cyc(0x1)
+; @pt p8: mtc(0x2)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma 12, 4 tsc +0
+;%0p5 cbr 2
+;%0p6 cyc 3 tsc +6
+;%0p7 cyc 1 tsc +2
+;%0p8 mtc 2 tsc -5
diff --git a/test/src/tsc-tma-cbr-cyc.ptt b/test/src/tsc-tma-cbr-cyc.ptt
new file mode 100644
index 000000000000..208dd4359954
--- /dev/null
+++ b/test/src/tsc-tma-cbr-cyc.ptt
@@ -0,0 +1,55 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based TSC estimation.
+;
+; Variant: CBR-based calibration,
+; CYC between TMA and MTC (not shown)
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --nom-freq 4 --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0x102, 0x8)
+; @pt p5: cbr(0x2)
+; @pt p6: cyc(0x3)
+; @pt p7: cyc(0x1)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma 102, 8 tsc +0
+;%0p5 cbr 2
+;%0p6 cyc 3 tsc +6
+;%0p7 cyc 1 tsc +2
diff --git a/test/src/tsc-tma-cbr-mtc-cyc-mtc.ptt b/test/src/tsc-tma-cbr-mtc-cyc-mtc.ptt
new file mode 100644
index 000000000000..864d2e909080
--- /dev/null
+++ b/test/src/tsc-tma-cbr-mtc-cyc-mtc.ptt
@@ -0,0 +1,58 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based TSC estimation.
+;
+; Variant: CBR-based calibration, time correction on MTC
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --nom-freq 4 --mtc-freq 0 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0xf01, 0x1)
+; @pt p5: cbr(0x2)
+; @pt p6: mtc(0x2)
+; @pt p7: cyc(0x3)
+; @pt p8: cyc(0x1)
+; @pt p9: mtc(0x3)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma f01, 1 tsc +0
+;%0p5 cbr 2
+;%0p6 mtc 2 tsc +3
+;%0p7 cyc 3 tsc +6
+;%0p8 cyc 1 tsc +2
+;%0p9 mtc 3 tsc -4
diff --git a/test/src/tsc-tma-cbr-mtc-cyc-no_cyc.ptt b/test/src/tsc-tma-cbr-mtc-cyc-no_cyc.ptt
new file mode 100644
index 000000000000..531815b044ae
--- /dev/null
+++ b/test/src/tsc-tma-cbr-mtc-cyc-no_cyc.ptt
@@ -0,0 +1,56 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC-based TSC estimation.
+;
+; Variant: Ignore CYC packets.
+;
+; opt:ptdump --time --time-delta --no-cyc
+; opt:ptdump --nom-freq 4 --mtc-freq 0 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0xf01, 0x1)
+; @pt p5: cbr(0x2)
+; @pt p6: mtc(0x2)
+; @pt p7: cyc(0x3)
+; @pt p8: cyc(0x1)
+; @pt p9: mtc(0x3)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma f01, 1 tsc +0
+;%0p5 cbr 2
+;%0p6 mtc 2 tsc +3
+;%0p9 mtc 3 tsc +4
diff --git a/test/src/tsc-tma-cbr-mtc-cyc-tsc.ptt b/test/src/tsc-tma-cbr-mtc-cyc-tsc.ptt
new file mode 100644
index 000000000000..1b1086d589da
--- /dev/null
+++ b/test/src/tsc-tma-cbr-mtc-cyc-tsc.ptt
@@ -0,0 +1,58 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based TSC estimation.
+;
+; Variant: CBR-based calibration, time correction on TSC
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --nom-freq 4 --mtc-freq 0 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0xf01, 0x1)
+; @pt p5: cbr(0x2)
+; @pt p6: mtc(0x2)
+; @pt p7: cyc(0x3)
+; @pt p8: cyc(0x1)
+; @pt p9: tsc(0xa0008)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma f01, 1 tsc +0
+;%0p5 cbr 2
+;%0p6 mtc 2 tsc +3
+;%0p7 cyc 3 tsc +6
+;%0p8 cyc 1 tsc +2
+;%0p9 tsc a0008 tsc -3
diff --git a/test/src/tsc-tma-cbr-mtc-cyc.ptt b/test/src/tsc-tma-cbr-mtc-cyc.ptt
new file mode 100644
index 000000000000..b66950528d4c
--- /dev/null
+++ b/test/src/tsc-tma-cbr-mtc-cyc.ptt
@@ -0,0 +1,56 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based TSC estimation.
+;
+; Variant: CBR-based calibration
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --nom-freq 4 --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0x102, 0x8)
+; @pt p5: cbr(0x2)
+; @pt p6: mtc(0x2)
+; @pt p7: cyc(0x3)
+; @pt p8: cyc(0x1)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma 102, 8 tsc +0
+;%0p5 cbr 2
+;%0p6 mtc 2 tsc +3f0
+;%0p7 cyc 3 tsc +6
+;%0p8 cyc 1 tsc +2
diff --git a/test/src/tsc-tma-cbr-mtc-cyc_calibrate.ptt b/test/src/tsc-tma-cbr-mtc-cyc_calibrate.ptt
new file mode 100644
index 000000000000..86e5ab3ace59
--- /dev/null
+++ b/test/src/tsc-tma-cbr-mtc-cyc_calibrate.ptt
@@ -0,0 +1,60 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based TSC estimation.
+;
+; Variant: CBR-based calibration, correct using MTC-based calibration
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --nom-freq 4 --mtc-freq 4 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0x18, 0x8)
+; @pt p5: cbr(0x2)
+; @pt p6: mtc(0x2)
+; @pt p7: cyc(0x100)
+; @pt p8: mtc(0x3)
+; @pt p9: cyc(0x100)
+; @pt p10: mtc(0x4)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma 18, 8 tsc +0
+;%0p5 cbr 2
+;%0p6 mtc 2 tsc +18
+;%0p7 cyc 100 tsc +200
+;%0p8 mtc 3 tsc -1c0
+;%0p9 cyc 100 tsc +40
+;%0p10 mtc 4 tsc +0
diff --git a/test/src/tsc-tma-cbr-mtc-mtc-cyc.ptt b/test/src/tsc-tma-cbr-mtc-mtc-cyc.ptt
new file mode 100644
index 000000000000..24251f1e3ea5
--- /dev/null
+++ b/test/src/tsc-tma-cbr-mtc-mtc-cyc.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based TSC estimation.
+;
+; Variant: CBR-based calibration
+; High CYC threshold resulting in no CYC between MTCs
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --mtc-freq 4 --nom-freq 1 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: tsc(0xa0000)
+; @pt p3: tma(0x18, 0x8)
+; @pt p4: cbr(2)
+; @pt p5: psbend()
+
+; @pt p6: mtc(0x2)
+; @pt p7: cyc(0x80)
+; @pt p8: mtc(0x3)
+; @pt p9: mtc(0x4)
+; @pt p10: cyc(0xe0)
+; @pt p11: mtc(0x5)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 tsc a0000 tsc +a0000
+;%0p3 tma 18, 8 tsc +0
+;%0p4 cbr 2
+;%0p5 psbend
+;%0p6 mtc 2 tsc +18
+;%0p7 cyc 80 tsc +40
+;%0p8 mtc 3 tsc +0
+;%0p9 mtc 4 tsc +40
+;%0p10 cyc e0 tsc +30
+;%0p11 mtc 5 tsc +10
diff --git a/test/src/tsc-tma-cyc.ptt b/test/src/tsc-tma-cyc.ptt
new file mode 100644
index 000000000000..04ebbdce141d
--- /dev/null
+++ b/test/src/tsc-tma-cyc.ptt
@@ -0,0 +1,52 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based TSC estimation.
+;
+; Variant: No calibration.
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0x102, 0x8)
+; @pt p5: cyc(0x3)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma 102, 8 tsc +0
+;[%p5: calibration error: no timing information]
+;[%p5: error updating time: no calibration]
+;%0p5 cyc 3 tsc +0
diff --git a/test/src/tsc-tma-mtc-cyc_calibrate.ptt b/test/src/tsc-tma-mtc-cyc_calibrate.ptt
new file mode 100644
index 000000000000..213d0fcc8e34
--- /dev/null
+++ b/test/src/tsc-tma-mtc-cyc_calibrate.ptt
@@ -0,0 +1,60 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based TSC estimation.
+;
+; Variant: MTC-based calibration
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --mtc-freq 4 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0x18, 0x8)
+; @pt p5: mtc(0x2)
+; @pt p6: cyc(0x100)
+; @pt p7: mtc(0x3)
+; @pt p8: cyc(0x100)
+; @pt p9: mtc(0x4)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma 18, 8 tsc +0
+;%0p5 mtc 2 tsc +18
+;[%p6: calibration error: no timing information]
+;[%p6: error updating time: no calibration]
+;%0p6 cyc 100 tsc +0
+;%0p7 mtc 3 tsc +40
+;%0p8 cyc 100 tsc +40
+;%0p9 mtc 4 tsc +0
diff --git a/test/src/tsc-tma-mtc-mtc-cyc_calibrate.ptt b/test/src/tsc-tma-mtc-mtc-cyc_calibrate.ptt
new file mode 100644
index 000000000000..f52a0bc7a12b
--- /dev/null
+++ b/test/src/tsc-tma-mtc-mtc-cyc_calibrate.ptt
@@ -0,0 +1,63 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based TSC estimation.
+;
+; Variant: MTC-based calibration
+; no CYC between MTC
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --mtc-freq 4 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0x18, 0x8)
+; @pt p5: mtc(0x2)
+; @pt p6: mtc(0x3)
+; @pt p7: cyc(0x100)
+; @pt p8: mtc(0x4)
+; @pt p9: cyc(0x80)
+; @pt p10: mtc(0x5)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma 18, 8 tsc +0
+;%0p5 mtc 2 tsc +18
+;%0p6 mtc 3 tsc +40
+;[%p7: calibration error: no timing information]
+;[%p7: error updating time: no calibration]
+;%0p7 cyc 100 tsc +0
+;%0p8 mtc 4 tsc +40
+;%0p9 cyc 80 tsc +40
+;%0p10 mtc 5 tsc +0
diff --git a/test/src/tsc-tma-mtc-tsc.ptt b/test/src/tsc-tma-mtc-tsc.ptt
new file mode 100644
index 000000000000..84f4674ef678
--- /dev/null
+++ b/test/src/tsc-tma-mtc-tsc.ptt
@@ -0,0 +1,54 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC-based TSC estimation.
+;
+; Variant: time correction on TSC
+;
+; opt:ptdump --time --time-delta --no-tcal
+; opt:ptdump --mtc-freq 4 --cpuid-0x15.eax 2 --cpuid-0x15.ebx 1
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0xf012, 0x6)
+; @pt p5: mtc(0x2)
+; @pt p6: mtc(0x3)
+; @pt p7: tsc(0xa0008)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma f012, 6 tsc +0
+;%0p5 mtc 2 tsc +1
+;%0p6 mtc 3 tsc +8
+;%0p7 tsc a0008 tsc -1
diff --git a/test/src/tsc-tma-mtc_absolute.ptt b/test/src/tsc-tma-mtc_absolute.ptt
new file mode 100644
index 000000000000..e7b683e20935
--- /dev/null
+++ b/test/src/tsc-tma-mtc_absolute.ptt
@@ -0,0 +1,52 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC-based TSC estimation.
+;
+; Variant: time displayed as absolute number
+;
+; opt:ptdump --time --no-tcal
+; opt:ptdump --mtc-freq 0 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 1
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0xff08, 0x1)
+; @pt p5: mtc(0x9)
+; @pt p6: mtc(0xa)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc 00000000000a0000
+;%0p4 tma ff08, 1 tsc 00000000000a0000
+;%0p5 mtc 9 tsc 00000000000a0000
+;%0p6 mtc a tsc 00000000000a0001
diff --git a/test/src/tsc-tma-mtc_infreq.ptt b/test/src/tsc-tma-mtc_infreq.ptt
new file mode 100644
index 000000000000..4549150e8698
--- /dev/null
+++ b/test/src/tsc-tma-mtc_infreq.ptt
@@ -0,0 +1,55 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC-based TSC estimation.
+;
+; Variant: low MTC frequency
+;
+; the MTC frequency is too low for TMA to provide the full CTC
+; estimate the missing bits using the next MTC
+;
+; opt:ptdump --time --no-tcal
+; opt:ptdump --mtc-freq 12 --cpuid-0x15.eax 2 --cpuid-0x15.ebx 1
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0018)
+; @pt p4: tma(0xe020, 0x8)
+; @pt p5: mtc(0xaf)
+; @pt p6: mtc(0xb0)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0018 tsc 00000000000a0018
+;%0p4 tma e020, 8 tsc 00000000000a0018
+;%0p5 mtc af tsc 00000000000a0800
+;%0p6 mtc b0 tsc 00000000000a1000
diff --git a/test/src/tsc-tma-mtc_infreq_wrap.ptt b/test/src/tsc-tma-mtc_infreq_wrap.ptt
new file mode 100644
index 000000000000..8f1b6a4ac298
--- /dev/null
+++ b/test/src/tsc-tma-mtc_infreq_wrap.ptt
@@ -0,0 +1,55 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC-based TSC estimation.
+;
+; Variant: low MTC frequency, wrap CTC
+;
+; the MTC frequency is too low for TMA to provide the full CTC
+; estimate the missing bits using the next MTC
+;
+; opt:ptdump --time --no-tcal
+; opt:ptdump --mtc-freq 12 --cpuid-0x15.eax 2 --cpuid-0x15.ebx 1
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0018)
+; @pt p4: tma(0xf020, 0x8)
+; @pt p5: mtc(0xa0)
+; @pt p6: mtc(0xa1)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0018 tsc 00000000000a0018
+;%0p4 tma f020, 8 tsc 00000000000a0018
+;%0p5 mtc a0 tsc 00000000000a0800
+;%0p6 mtc a1 tsc 00000000000a1000
diff --git a/test/src/tsc-tma-mtc_relative.ptt b/test/src/tsc-tma-mtc_relative.ptt
new file mode 100644
index 000000000000..2a9678676e53
--- /dev/null
+++ b/test/src/tsc-tma-mtc_relative.ptt
@@ -0,0 +1,52 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC-based TSC estimation.
+;
+; Variant: time displayed as delta
+;
+; opt:ptdump --time --time-delta --no-tcal
+; opt:ptdump --mtc-freq 4 --cpuid-0x15.eax 2 --cpuid-0x15.ebx 1
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0xf012, 0x6)
+; @pt p5: mtc(0x2)
+; @pt p6: mtc(0x3)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma f012, 6 tsc +0
+;%0p5 mtc 2 tsc +1
+;%0p6 mtc 3 tsc +8
diff --git a/test/src/tsc-tma-mtc_wrap.ptt b/test/src/tsc-tma-mtc_wrap.ptt
new file mode 100644
index 000000000000..5268a1c2a974
--- /dev/null
+++ b/test/src/tsc-tma-mtc_wrap.ptt
@@ -0,0 +1,52 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC-based TSC estimation.
+;
+; Variant: wrap the CTC counter in MTC
+;
+; opt:ptdump --time --time-delta --no-tcal
+; opt:ptdump --mtc-freq 0 --cpuid-0x15.eax 3 --cpuid-0x15.ebx 9
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0x1fff, 0x1)
+; @pt p5: mtc(0x0)
+; @pt p6: mtc(0x1)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma 1fff, 1 tsc +0
+;%0p5 mtc 0 tsc +2
+;%0p6 mtc 1 tsc +3
diff --git a/test/src/tsc-tma_zero_fc-cbr-cyc.ptt b/test/src/tsc-tma_zero_fc-cbr-cyc.ptt
new file mode 100644
index 000000000000..2e0c583da60f
--- /dev/null
+++ b/test/src/tsc-tma_zero_fc-cbr-cyc.ptt
@@ -0,0 +1,56 @@
+; Copyright (c) 2016-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC and CYC-based TSC estimation.
+;
+; Variant: CBR-based calibration,
+; CYC between TMA and MTC (not shown)
+; TMA provides an FC of zero (which triggers CYC adjustment)
+;
+; opt:ptdump --time --time-delta
+; opt:ptdump --nom-freq 4 --mtc-freq 8 --cpuid-0x15.eax 1 --cpuid-0x15.ebx 4
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0x102, 0)
+; @pt p5: cbr(0x2)
+; @pt p6: cyc(0x3)
+; @pt p7: cyc(0x1)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma 102, 0 tsc +0
+;%0p5 cbr 2
+;%0p6 cyc 3 tsc +6
+;%0p7 cyc 1 tsc +2
diff --git a/test/src/tsc_tma_mtc_gap.ptt b/test/src/tsc_tma_mtc_gap.ptt
new file mode 100644
index 000000000000..8db1631defc6
--- /dev/null
+++ b/test/src/tsc_tma_mtc_gap.ptt
@@ -0,0 +1,52 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test MTC-based TSC estimation.
+;
+; Variant: omit some MTC
+;
+; opt:ptdump --time --time-delta --no-tcal
+; opt:ptdump --mtc-freq 0 --cpuid-0x15.eax 2 --cpuid-0x15.ebx 8
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: psbend()
+
+; @pt p3: tsc(0xa0000)
+; @pt p4: tma(0x1, 0x4)
+; @pt p5: mtc(0x4)
+; @pt p6: mtc(0xa)
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 psbend
+;%0p3 tsc a0000 tsc +a0000
+;%0p4 tma 1, 4 tsc +0
+;%0p5 mtc 4 tsc +8
+;%0p6 mtc a tsc +18
diff --git a/test/src/tsx-abort.ptt b/test/src/tsx-abort.ptt
new file mode 100644
index 000000000000..bbaea1f32161
--- /dev/null
+++ b/test/src/tsx-abort.ptt
@@ -0,0 +1,76 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TSX aborts are shown correctly.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: nop
+
+; @pt p5: mode.tsx(begin)
+; @pt p6: fup(1: %l2)
+l2: nop
+
+; @pt p7: mode.tsx(abort)
+; @pt p8: fup(1: %l3)
+; @pt p9: tip(1: %l5)
+l3: nop
+l4: hlt
+
+l5: nop
+; @pt p10: fup(1: %l6)
+; @pt p11: tip.pgd(0: %l7)
+l6: nop
+l7: hlt
+
+; @pt .exp(ptxed)
+;%0l1 # nop
+;[begin transaction]
+;? %0l2 # nop
+;[aborted]
+;[interrupt]
+;%0l5 # nop
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 mode.tsx intx
+;%0p6 fup 1: %?l2.2
+;%0p7 mode.tsx abrt
+;%0p8 fup 1: %?l3.2
+;%0p9 tip 1: %?l5.2
+;%0p10 fup 1: %?l6.2
+;%0p11 tip.pgd 0: %?l7.0
diff --git a/test/src/tsx-commit.ptt b/test/src/tsx-commit.ptt
new file mode 100644
index 000000000000..0ac411f316b8
--- /dev/null
+++ b/test/src/tsx-commit.ptt
@@ -0,0 +1,71 @@
+; Copyright (c) 2013-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TSX commits are shown correctly.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: psbend()
+l1: nop
+
+; @pt p5: mode.tsx(begin)
+; @pt p6: fup(1: %l2)
+l2: nop
+
+; @pt p7: mode.tsx(commit)
+; @pt p8: fup(1: %l3)
+l3: nop
+
+; @pt p9: fup(1: %l4)
+; @pt p10: tip.pgd(0: %l5)
+l4: nop
+l5: hlt
+
+; @pt .exp(ptxed)
+;%0l1 # nop
+;[begin transaction]
+;? %0l2 # nop
+;[committed]
+;%0l3 # nop
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 psbend
+;%0p5 mode.tsx intx
+;%0p6 fup 1: %?l2.2
+;%0p7 mode.tsx
+;%0p8 fup 1: %?l3.2
+;%0p9 fup 1: %?l4.2
+;%0p10 tip.pgd 0: %?l5.0
diff --git a/test/src/tsx-no_spurious_commit.ptt b/test/src/tsx-no_spurious_commit.ptt
new file mode 100644
index 000000000000..afc54168819f
--- /dev/null
+++ b/test/src/tsx-no_spurious_commit.ptt
@@ -0,0 +1,71 @@
+; Copyright (c) 2014-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that TSX status updates in adjacent PSB+ do not cause spurious
+; commit indications.
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: fup(3: %l1)
+; @pt p3: mode.exec(64bit)
+; @pt p4: mode.tsx(commit)
+; @pt p5: psbend()
+l1: nop
+
+; @pt p6: psb()
+; @pt p7: fup(3: %l2)
+; @pt p8: mode.exec(64bit)
+; @pt p9: mode.tsx(commit)
+; @pt p10: psbend()
+l2: nop
+
+; @pt p11: fup(1: %l3)
+; @pt p12: tip.pgd(0: %l4)
+l3: nop
+l4: hlt
+
+; @pt .exp(ptxed)
+;%0l1 # nop
+;%0l2 # nop
+;[disabled]
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 fup 3: %0l1
+;%0p3 mode.exec cs.l
+;%0p4 mode.tsx
+;%0p5 psbend
+;%0p6 psb
+;%0p7 fup 3: %0l2
+;%0p8 mode.exec cs.l
+;%0p9 mode.tsx
+;%0p10 psbend
+;%0p11 fup 1: %?l3.2
+;%0p12 tip.pgd 0: %?l4.0
diff --git a/test/src/vmcs-far_call.ptt b/test/src/vmcs-far_call.ptt
new file mode 100644
index 000000000000..6798c45e1460
--- /dev/null
+++ b/test/src/vmcs-far_call.ptt
@@ -0,0 +1,68 @@
+; Copyright (c) 2015-2018, Intel Corporation
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; * Redistributions of source code must retain the above copyright notice,
+; this list of conditions and the following disclaimer.
+; * Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+; * Neither the name of Intel Corporation nor the names of its contributors
+; may be used to endorse or promote products derived from this software
+; without specific prior written permission.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+; AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+; ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+; LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+; CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+; SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+; INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+; CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+; ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+; POSSIBILITY OF SUCH DAMAGE.
+
+; Test that VMCS binds to a far branch
+;
+
+org 0x100000
+bits 64
+
+; @pt p1: psb()
+; @pt p2: mode.exec(64bit)
+; @pt p3: fup(3: %l1)
+; @pt p4: psbend()
+l1: nop
+
+; @pt p5: vmcs(0xcdcdc000)
+; @pt p6: tip(3: %l4)
+l2: call far [rax] ; l4
+l3: hlt
+
+l4: nop
+
+; @pt p7: fup(1: %l5)
+; @pt p8: tip.pgd(0: %l6)
+l5: nop
+l6: hlt
+
+
+; @pt .exp(ptdump)
+;%0p1 psb
+;%0p2 mode.exec cs.l
+;%0p3 fup 3: %?l1
+;%0p4 psbend
+;%0p5 vmcs cdcdc000 vmcs 00000000cdcdc000
+;%0p6 tip 3: %?l4
+;%0p7 fup 1: %?l5.2
+;%0p8 tip.pgd 0: %?l6.0
+
+
+; @pt .exp(ptxed)
+;%0l1 # nop
+;%0l2 # call far [rax] # l4
+;[vmcs, base: 00000000cdcdc000]
+;%0l4 # nop
+;[disabled]