summaryrefslogtreecommitdiff
path: root/contrib/processor-trace
diff options
context:
space:
mode:
authorRuslan Bukin <br@FreeBSD.org>2018-03-19 18:59:15 +0000
committerRuslan Bukin <br@FreeBSD.org>2018-03-19 18:59:15 +0000
commit74fe6c29fb7eef3418d7919dcd41dc1a04a982a1 (patch)
tree02a99c9f9a877a2ceeceb474fea6346f017fd8cc /contrib/processor-trace
parent7af5f2acfb6b465bfe8d32d7e54f603f9a59e3fd (diff)
parent766f5c51c3151507d3be26d606710d708302d8b2 (diff)
downloadsrc-test-74fe6c29fb7eef3418d7919dcd41dc1a04a982a1.tar.gz
src-test-74fe6c29fb7eef3418d7919dcd41dc1a04a982a1.zip
Import Intel Processor Trace decoder library from
vendor/processor-trace/24982c1a6fce48f1e416461d42899805f74fbb26 Sponsored by: DARPA, AFRL
Notes
Notes: svn path=/head/; revision=331220
Diffstat (limited to 'contrib/processor-trace')
-rw-r--r--contrib/processor-trace/include/posix/threads.h259
-rw-r--r--contrib/processor-trace/include/pt_compiler.h47
-rw-r--r--contrib/processor-trace/include/windows/inttypes.h65
-rw-r--r--contrib/processor-trace/include/windows/threads.h239
-rw-r--r--contrib/processor-trace/libipt/CMakeLists.txt172
-rwxr-xr-xcontrib/processor-trace/libipt/include/intel-pt.h2463
-rwxr-xr-xcontrib/processor-trace/libipt/include/intel-pt.h.in2463
-rw-r--r--contrib/processor-trace/libipt/internal/include/posix/pt_section_posix.h100
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_asid.h74
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_block_cache.h225
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_block_decoder.h143
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_config.h82
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_cpu.h54
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_cpuid.h40
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_decoder_function.h129
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_encoder.h125
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_event_queue.h143
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_ild.h128
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_image.h140
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_image_section_cache.h206
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_insn.h212
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_insn_decoder.h139
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_last_ip.h79
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_mapped_section.h199
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_msec_cache.h95
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_opcodes.h397
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_packet.h111
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_packet_decoder.h92
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_query_decoder.h134
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_retstack.h87
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_section.h392
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_section_file.h106
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_sync.h71
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_time.h232
-rw-r--r--contrib/processor-trace/libipt/internal/include/pt_tnt_cache.h88
-rw-r--r--contrib/processor-trace/libipt/internal/include/pti-disp-defs.h39
-rw-r--r--contrib/processor-trace/libipt/internal/include/pti-disp.h544
-rw-r--r--contrib/processor-trace/libipt/internal/include/pti-imm-defs.h46
-rw-r--r--contrib/processor-trace/libipt/internal/include/pti-imm.h544
-rw-r--r--contrib/processor-trace/libipt/internal/include/pti-modrm-defs.h38
-rw-r--r--contrib/processor-trace/libipt/internal/include/pti-modrm.h544
-rw-r--r--contrib/processor-trace/libipt/internal/include/windows/pt_section_windows.h111
-rw-r--r--contrib/processor-trace/libipt/src/posix/init.c36
-rw-r--r--contrib/processor-trace/libipt/src/posix/pt_cpuid.c37
-rw-r--r--contrib/processor-trace/libipt/src/posix/pt_section_posix.c326
-rw-r--r--contrib/processor-trace/libipt/src/pt_asid.c106
-rw-r--r--contrib/processor-trace/libipt/src/pt_block_cache.c96
-rw-r--r--contrib/processor-trace/libipt/src/pt_block_decoder.c3469
-rw-r--r--contrib/processor-trace/libipt/src/pt_config.c251
-rw-r--r--contrib/processor-trace/libipt/src/pt_cpu.c164
-rw-r--r--contrib/processor-trace/libipt/src/pt_decoder_function.c379
-rw-r--r--contrib/processor-trace/libipt/src/pt_encoder.c917
-rw-r--r--contrib/processor-trace/libipt/src/pt_error.c122
-rw-r--r--contrib/processor-trace/libipt/src/pt_event_queue.c203
-rw-r--r--contrib/processor-trace/libipt/src/pt_ild.c1223
-rw-r--r--contrib/processor-trace/libipt/src/pt_image.c718
-rw-r--r--contrib/processor-trace/libipt/src/pt_image_section_cache.c1091
-rw-r--r--contrib/processor-trace/libipt/src/pt_insn.c372
-rw-r--r--contrib/processor-trace/libipt/src/pt_insn_decoder.c1765
-rw-r--r--contrib/processor-trace/libipt/src/pt_last_ip.c127
-rw-r--r--contrib/processor-trace/libipt/src/pt_msec_cache.c136
-rw-r--r--contrib/processor-trace/libipt/src/pt_packet.c573
-rw-r--r--contrib/processor-trace/libipt/src/pt_packet_decoder.c723
-rw-r--r--contrib/processor-trace/libipt/src/pt_query_decoder.c3630
-rw-r--r--contrib/processor-trace/libipt/src/pt_retstack.c94
-rw-r--r--contrib/processor-trace/libipt/src/pt_section.c643
-rw-r--r--contrib/processor-trace/libipt/src/pt_section_file.c255
-rw-r--r--contrib/processor-trace/libipt/src/pt_sync.c241
-rw-r--r--contrib/processor-trace/libipt/src/pt_time.c674
-rw-r--r--contrib/processor-trace/libipt/src/pt_tnt_cache.c89
-rw-r--r--contrib/processor-trace/libipt/src/pt_version.c43
-rw-r--r--contrib/processor-trace/libipt/src/windows/init.c51
-rw-r--r--contrib/processor-trace/libipt/src/windows/pt_cpuid.c43
-rw-r--r--contrib/processor-trace/libipt/src/windows/pt_section_windows.c397
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-asid.c425
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-block_cache.c370
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-config.c496
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-cpp.cpp78
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-cpu.c173
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-event_queue.c470
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-fetch.c693
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-ild.c759
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-image.c2286
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-image_section_cache.c2027
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-last_ip.c374
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-mapped_section.c198
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-msec_cache.c419
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-packet.c859
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-query.c2873
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-retstack.c232
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-section-file.c192
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-section.c1396
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-sync.c306
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-time.c368
-rw-r--r--contrib/processor-trace/libipt/test/src/ptunit-tnt_cache.c246
95 files changed, 45831 insertions, 0 deletions
diff --git a/contrib/processor-trace/include/posix/threads.h b/contrib/processor-trace/include/posix/threads.h
new file mode 100644
index 0000000000000..a9dcf05f757a2
--- /dev/null
+++ b/contrib/processor-trace/include/posix/threads.h
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * It looks like there is still no support for C11's threads.h.
+ *
+ * We implement the few features we actually need hoping that this file will
+ * soon go away.
+ */
+
+#ifndef THREADS_H
+#define THREADS_H
+
+#include <pthread.h>
+
+#ifndef PTHREAD_MUTEX_NORMAL
+# define PTHREAD_MUTEX_NORMAL PTHREAD_MUTEX_TIMED_NP
+#endif
+
+#include <stdint.h>
+#include <stdlib.h>
+
+enum {
+ thrd_success = 1,
+ thrd_error
+};
+
+struct pt_thread {
+ pthread_t thread;
+};
+typedef struct pt_thread thrd_t;
+
+typedef int (*thrd_start_t)(void *);
+
+
+struct thrd_args {
+ thrd_start_t fun;
+ void *arg;
+};
+
+static void *thrd_routine(void *arg)
+{
+ struct thrd_args *args;
+ int result;
+
+ args = arg;
+ if (!args)
+ return (void *) (intptr_t) -1;
+
+ result = -1;
+ if (args->fun)
+ result = args->fun(args->arg);
+
+ free(args);
+
+ return (void *) (intptr_t) result;
+}
+
+static inline int thrd_create(thrd_t *thrd, thrd_start_t fun, void *arg)
+{
+ struct thrd_args *args;
+ int errcode;
+
+ if (!thrd || !fun)
+ return thrd_error;
+
+ args = malloc(sizeof(*args));
+ if (!args)
+ return thrd_error;
+
+ args->fun = fun;
+ args->arg = arg;
+
+ errcode = pthread_create(&thrd->thread, NULL, thrd_routine, args);
+ if (errcode) {
+ free(args);
+ return thrd_error;
+ }
+
+ return thrd_success;
+}
+
+static inline int thrd_join(thrd_t *thrd, int *res)
+{
+ void *result;
+ int errcode;
+
+ if (!thrd)
+ return thrd_error;
+
+ errcode = pthread_join(thrd->thread, &result);
+ if (errcode)
+ return thrd_error;
+
+ if (res)
+ *res = (int) (intptr_t) result;
+
+ return thrd_success;
+}
+
+
+struct pt_mutex {
+ pthread_mutex_t mutex;
+};
+typedef struct pt_mutex mtx_t;
+
+enum {
+ mtx_plain = PTHREAD_MUTEX_NORMAL
+};
+
+static inline int mtx_init(mtx_t *mtx, int type)
+{
+ int errcode;
+
+ if (!mtx || type != mtx_plain)
+ return thrd_error;
+
+ errcode = pthread_mutex_init(&mtx->mutex, NULL);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+static inline void mtx_destroy(mtx_t *mtx)
+{
+ if (mtx)
+ (void) pthread_mutex_destroy(&mtx->mutex);
+}
+
+static inline int mtx_lock(mtx_t *mtx)
+{
+ int errcode;
+
+ if (!mtx)
+ return thrd_error;
+
+ errcode = pthread_mutex_lock(&mtx->mutex);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+static inline int mtx_unlock(mtx_t *mtx)
+{
+ int errcode;
+
+ if (!mtx)
+ return thrd_error;
+
+ errcode = pthread_mutex_unlock(&mtx->mutex);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+
+struct pt_cond {
+ pthread_cond_t cond;
+};
+typedef struct pt_cond cnd_t;
+
+static inline int cnd_init(cnd_t *cnd)
+{
+ int errcode;
+
+ if (!cnd)
+ return thrd_error;
+
+ errcode = pthread_cond_init(&cnd->cond, NULL);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+static inline int cnd_destroy(cnd_t *cnd)
+{
+ int errcode;
+
+ if (!cnd)
+ return thrd_error;
+
+ errcode = pthread_cond_destroy(&cnd->cond);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+static inline int cnd_signal(cnd_t *cnd)
+{
+ int errcode;
+
+ if (!cnd)
+ return thrd_error;
+
+ errcode = pthread_cond_signal(&cnd->cond);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+static inline int cnd_broadcast(cnd_t *cnd)
+{
+ int errcode;
+
+ if (!cnd)
+ return thrd_error;
+
+ errcode = pthread_cond_broadcast(&cnd->cond);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+static inline int cnd_wait(cnd_t *cnd, mtx_t *mtx)
+{
+ int errcode;
+
+ if (!cnd || !mtx)
+ return thrd_error;
+
+ errcode = pthread_cond_wait(&cnd->cond, &mtx->mutex);
+ if (errcode)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+#endif /* THREADS_H */
diff --git a/contrib/processor-trace/include/pt_compiler.h b/contrib/processor-trace/include/pt_compiler.h
new file mode 100644
index 0000000000000..611703f259287
--- /dev/null
+++ b/contrib/processor-trace/include/pt_compiler.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_COMPILER_H
+#define PT_COMPILER_H
+
+
+/* Both the relevant Clang and GCC versions support this. */
+#if !defined(__has_attribute)
+# define __has_attribute(attr) 0
+#endif
+
+#if !defined(fallthrough)
+# if (__has_attribute(fallthrough))
+# define fallthrough __attribute__((fallthrough))
+# else
+# define fallthrough /* Fall through. */
+# endif
+#endif /* !defined(fallthrough) */
+
+
+#endif /* PT_COMPILER_H */
diff --git a/contrib/processor-trace/include/windows/inttypes.h b/contrib/processor-trace/include/windows/inttypes.h
new file mode 100644
index 0000000000000..3659f361f1b49
--- /dev/null
+++ b/contrib/processor-trace/include/windows/inttypes.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+
+#ifndef PRId64
+# define PRId64 "lld"
+#endif
+#ifndef PRIu64
+# define PRIu64 "llu"
+#endif
+#ifndef PRIx64
+# define PRIx64 "llx"
+#endif
+
+#ifndef PRId32
+# define PRId32 "d"
+#endif
+#ifndef PRIu32
+# define PRIu32 "u"
+#endif
+#ifndef PRIx32
+# define PRIx32 "x"
+#endif
+
+#ifndef PRIu16
+# define PRIu16 "u"
+#endif
+
+#ifndef PRIu8
+# define PRIu8 "u"
+#endif
+#ifndef PRIx8
+# define PRIx8 "x"
+#endif
+
+#ifndef SCNx64
+# define SCNx64 "llx"
+#endif
diff --git a/contrib/processor-trace/include/windows/threads.h b/contrib/processor-trace/include/windows/threads.h
new file mode 100644
index 0000000000000..9e57b81d4ca71
--- /dev/null
+++ b/contrib/processor-trace/include/windows/threads.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * It looks like there is still no support for C11's threads.h.
+ *
+ * We implement the few features we actually need hoping that this file will
+ * soon go away.
+ */
+
+#ifndef THREADS_H
+#define THREADS_H
+
+#include "windows.h"
+
+
+enum {
+ thrd_success = 1,
+ thrd_error
+};
+
+
+struct pt_thread {
+ HANDLE handle;
+};
+typedef struct pt_thread thrd_t;
+
+typedef int (*thrd_start_t)(void *);
+
+
+struct thrd_args {
+ thrd_start_t fun;
+ void *arg;
+};
+
+static DWORD WINAPI thrd_routine(void *arg)
+{
+ struct thrd_args *args;
+ int result;
+
+ args = (struct thrd_args *) arg;
+ if (!args)
+ return (DWORD) -1;
+
+ result = -1;
+ if (args->fun)
+ result = args->fun(args->arg);
+
+ free(args);
+
+ return (DWORD) result;
+}
+
+static inline int thrd_create(thrd_t *thrd, thrd_start_t fun, void *arg)
+{
+ struct thrd_args *args;
+ HANDLE handle;
+
+ if (!thrd || !fun)
+ return thrd_error;
+
+ args = malloc(sizeof(*args));
+ if (!args)
+ return thrd_error;
+
+ args->fun = fun;
+ args->arg = arg;
+
+ handle = CreateThread(NULL, 0, thrd_routine, args, 0, NULL);
+ if (!handle) {
+ free(args);
+ return thrd_error;
+ }
+
+ thrd->handle = handle;
+ return thrd_success;
+}
+
+static inline int thrd_join(thrd_t *thrd, int *res)
+{
+ DWORD status;
+ BOOL success;
+
+ if (!thrd)
+ return thrd_error;
+
+ status = WaitForSingleObject(thrd->handle, INFINITE);
+ if (status)
+ return thrd_error;
+
+ if (res) {
+ DWORD result;
+
+ success = GetExitCodeThread(thrd->handle, &result);
+ if (!success) {
+ (void) CloseHandle(thrd->handle);
+ return thrd_error;
+ }
+
+ *res = (int) result;
+ }
+
+ success = CloseHandle(thrd->handle);
+ if (!success)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+struct pt_mutex {
+ CRITICAL_SECTION cs;
+};
+typedef struct pt_mutex mtx_t;
+
+enum {
+ mtx_plain
+};
+
+static inline int mtx_init(mtx_t *mtx, int type)
+{
+ if (!mtx || type != mtx_plain)
+ return thrd_error;
+
+ InitializeCriticalSection(&mtx->cs);
+
+ return thrd_success;
+}
+
+static inline void mtx_destroy(mtx_t *mtx)
+{
+ if (mtx)
+ DeleteCriticalSection(&mtx->cs);
+}
+
+static inline int mtx_lock(mtx_t *mtx)
+{
+ if (!mtx)
+ return thrd_error;
+
+ EnterCriticalSection(&mtx->cs);
+
+ return thrd_success;
+}
+
+static inline int mtx_unlock(mtx_t *mtx)
+{
+ if (!mtx)
+ return thrd_error;
+
+ LeaveCriticalSection(&mtx->cs);
+
+ return thrd_success;
+}
+
+
+struct pt_cond {
+ CONDITION_VARIABLE cond;
+};
+typedef struct pt_cond cnd_t;
+
+static inline int cnd_init(cnd_t *cnd)
+{
+ if (!cnd)
+ return thrd_error;
+
+ InitializeConditionVariable(&cnd->cond);
+
+ return thrd_success;
+}
+
+static inline int cnd_destroy(cnd_t *cnd)
+{
+ if (!cnd)
+ return thrd_error;
+
+ /* Nothing to do. */
+
+ return thrd_success;
+}
+
+static inline int cnd_signal(cnd_t *cnd)
+{
+ if (!cnd)
+ return thrd_error;
+
+ WakeConditionVariable(&cnd->cond);
+
+ return thrd_success;
+}
+
+static inline int cnd_broadcast(cnd_t *cnd)
+{
+ if (!cnd)
+ return thrd_error;
+
+ WakeAllConditionVariable(&cnd->cond);
+
+ return thrd_success;
+}
+
+static inline int cnd_wait(cnd_t *cnd, mtx_t *mtx)
+{
+ BOOL success;
+
+ if (!cnd || !mtx)
+ return thrd_error;
+
+ success = SleepConditionVariableCS(&cnd->cond, &mtx->cs, INFINITE);
+ if (!success)
+ return thrd_error;
+
+ return thrd_success;
+}
+
+#endif /* THREADS_H */
diff --git a/contrib/processor-trace/libipt/CMakeLists.txt b/contrib/processor-trace/libipt/CMakeLists.txt
new file mode 100644
index 0000000000000..726bdfe0c8692
--- /dev/null
+++ b/contrib/processor-trace/libipt/CMakeLists.txt
@@ -0,0 +1,172 @@
+# Copyright (c) 2013-2018, Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Intel Corporation nor the names of its contributors
+# may be used to endorse or promote products derived from this software
+# without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+include_directories(
+ internal/include
+)
+
+set(LIBIPT_SECTION_FILES
+ src/pt_section.c
+ src/pt_section_file.c
+)
+
+set(LIBIPT_FILES
+ src/pt_error.c
+ src/pt_packet_decoder.c
+ src/pt_query_decoder.c
+ src/pt_encoder.c
+ src/pt_sync.c
+ src/pt_version.c
+ src/pt_last_ip.c
+ src/pt_tnt_cache.c
+ src/pt_ild.c
+ src/pt_image.c
+ src/pt_image_section_cache.c
+ src/pt_retstack.c
+ src/pt_insn_decoder.c
+ src/pt_time.c
+ src/pt_asid.c
+ src/pt_event_queue.c
+ src/pt_packet.c
+ src/pt_decoder_function.c
+ src/pt_config.c
+ src/pt_insn.c
+ src/pt_block_decoder.c
+ src/pt_block_cache.c
+ src/pt_msec_cache.c
+)
+
+if (CMAKE_HOST_UNIX)
+ include_directories(
+ internal/include/posix
+ )
+
+ set(LIBIPT_FILES ${LIBIPT_FILES} src/posix/init.c)
+ set(LIBIPT_SECTION_FILES ${LIBIPT_SECTION_FILES} src/posix/pt_section_posix.c)
+endif (CMAKE_HOST_UNIX)
+
+if (CMAKE_HOST_WIN32)
+ add_definitions(
+ # export libipt symbols
+ #
+ /Dpt_export=__declspec\(dllexport\)
+ )
+
+ include_directories(
+ internal/include/windows
+ )
+
+ set(LIBIPT_FILES ${LIBIPT_FILES} src/windows/init.c)
+ set(LIBIPT_SECTION_FILES ${LIBIPT_SECTION_FILES} src/windows/pt_section_windows.c)
+endif (CMAKE_HOST_WIN32)
+
+set(LIBIPT_FILES ${LIBIPT_FILES} ${LIBIPT_SECTION_FILES})
+
+add_library(libipt SHARED
+ ${LIBIPT_FILES}
+)
+
+# put the version into the intel-pt header
+#
+configure_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/include/intel-pt.h.in
+ ${CMAKE_CURRENT_BINARY_DIR}/include/intel-pt.h
+)
+
+set_target_properties(libipt PROPERTIES
+ PREFIX ""
+ PUBLIC_HEADER ${CMAKE_CURRENT_BINARY_DIR}/include/intel-pt.h
+ VERSION ${PT_VERSION}
+ SOVERSION ${PT_VERSION_MAJOR}
+)
+
+install(TARGETS libipt
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
+ PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
+)
+
+
+function(add_ptunit_std_test name)
+ add_ptunit_c_test(${name} src/pt_${name}.c ${ARGN})
+endfunction(add_ptunit_std_test)
+
+
+add_ptunit_std_test(last_ip)
+add_ptunit_std_test(tnt_cache)
+add_ptunit_std_test(retstack)
+add_ptunit_std_test(ild)
+add_ptunit_std_test(cpu)
+add_ptunit_std_test(time)
+add_ptunit_std_test(asid)
+add_ptunit_std_test(event_queue)
+add_ptunit_std_test(image src/pt_asid.c)
+add_ptunit_std_test(sync src/pt_packet.c)
+add_ptunit_std_test(config)
+add_ptunit_std_test(image_section_cache)
+add_ptunit_std_test(block_cache)
+add_ptunit_std_test(msec_cache)
+
+add_ptunit_c_test(mapped_section src/pt_asid.c)
+add_ptunit_c_test(query
+ src/pt_encoder.c
+ src/pt_last_ip.c
+ src/pt_packet_decoder.c
+ src/pt_sync.c
+ src/pt_tnt_cache.c
+ src/pt_time.c
+ src/pt_event_queue.c
+ src/pt_query_decoder.c
+ src/pt_packet.c
+ src/pt_decoder_function.c
+ src/pt_packet_decoder.c
+ src/pt_config.c
+ src/pt_time.c
+ src/pt_block_cache.c
+)
+add_ptunit_c_test(section ${LIBIPT_SECTION_FILES})
+add_ptunit_c_test(section-file
+ test/src/ptunit-section.c
+ src/pt_section.c
+ src/pt_section_file.c
+)
+add_ptunit_c_test(packet
+ src/pt_encoder.c
+ src/pt_packet_decoder.c
+ src/pt_sync.c
+ src/pt_packet.c
+ src/pt_decoder_function.c
+ src/pt_config.c
+)
+add_ptunit_c_test(fetch
+ src/pt_decoder_function.c
+ src/pt_encoder.c
+ src/pt_config.c
+)
+
+add_ptunit_cpp_test(cpp)
+add_ptunit_libraries(cpp libipt)
diff --git a/contrib/processor-trace/libipt/include/intel-pt.h b/contrib/processor-trace/libipt/include/intel-pt.h
new file mode 100755
index 0000000000000..de1c6275c8063
--- /dev/null
+++ b/contrib/processor-trace/libipt/include/intel-pt.h
@@ -0,0 +1,2463 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef INTEL_PT_H
+#define INTEL_PT_H
+
+#include <stdint.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Intel(R) Processor Trace (Intel PT) decoder library.
+ *
+ * This file is logically structured into the following sections:
+ *
+ * - Version
+ * - Errors
+ * - Configuration
+ * - Packet encoder / decoder
+ * - Query decoder
+ * - Traced image
+ * - Instruction flow decoder
+ * - Block decoder
+ */
+
+
+
+struct pt_encoder;
+struct pt_packet_decoder;
+struct pt_query_decoder;
+struct pt_insn_decoder;
+struct pt_block_decoder;
+
+
+
+/* A macro to mark functions as exported. */
+#ifndef pt_export
+# if defined(__GNUC__)
+# define pt_export __attribute__((visibility("default")))
+# elif defined(_MSC_VER)
+# define pt_export __declspec(dllimport)
+# else
+# error "unknown compiler"
+# endif
+#endif
+
+
+
+/* Version. */
+
+
+/** The header version. */
+#define LIBIPT_VERSION_MAJOR ${PT_VERSION_MAJOR}
+#define LIBIPT_VERSION_MINOR ${PT_VERSION_MINOR}
+
+#define LIBIPT_VERSION ((LIBIPT_VERSION_MAJOR << 8) + LIBIPT_VERSION_MINOR)
+
+
+/** The library version. */
+struct pt_version {
+ /** Major version number. */
+ uint8_t major;
+
+ /** Minor version number. */
+ uint8_t minor;
+
+ /** Reserved bits. */
+ uint16_t reserved;
+
+ /** Build number. */
+ uint32_t build;
+
+ /** Version extension. */
+ const char *ext;
+};
+
+
+/** Return the library version. */
+extern pt_export struct pt_version pt_library_version(void);
+
+
+
+/* Errors. */
+
+
+
+/** Error codes. */
+enum pt_error_code {
+ /* No error. Everything is OK. */
+ pte_ok,
+
+ /* Internal decoder error. */
+ pte_internal,
+
+ /* Invalid argument. */
+ pte_invalid,
+
+ /* Decoder out of sync. */
+ pte_nosync,
+
+ /* Unknown opcode. */
+ pte_bad_opc,
+
+ /* Unknown payload. */
+ pte_bad_packet,
+
+ /* Unexpected packet context. */
+ pte_bad_context,
+
+ /* Decoder reached end of trace stream. */
+ pte_eos,
+
+ /* No packet matching the query to be found. */
+ pte_bad_query,
+
+ /* Decoder out of memory. */
+ pte_nomem,
+
+ /* Bad configuration. */
+ pte_bad_config,
+
+ /* There is no IP. */
+ pte_noip,
+
+ /* The IP has been suppressed. */
+ pte_ip_suppressed,
+
+ /* There is no memory mapped at the requested address. */
+ pte_nomap,
+
+ /* An instruction could not be decoded. */
+ pte_bad_insn,
+
+ /* No wall-clock time is available. */
+ pte_no_time,
+
+ /* No core:bus ratio available. */
+ pte_no_cbr,
+
+ /* Bad traced image. */
+ pte_bad_image,
+
+ /* A locking error. */
+ pte_bad_lock,
+
+ /* The requested feature is not supported. */
+ pte_not_supported,
+
+ /* The return address stack is empty. */
+ pte_retstack_empty,
+
+ /* A compressed return is not indicated correctly by a taken branch. */
+ pte_bad_retcomp,
+
+ /* The current decoder state does not match the state in the trace. */
+ pte_bad_status_update,
+
+ /* The trace did not contain an expected enabled event. */
+ pte_no_enable,
+
+ /* An event was ignored. */
+ pte_event_ignored,
+
+ /* Something overflowed. */
+ pte_overflow,
+
+ /* A file handling error. */
+ pte_bad_file,
+
+ /* Unknown cpu. */
+ pte_bad_cpu
+};
+
+
+/** Decode a function return value into an pt_error_code. */
+static inline enum pt_error_code pt_errcode(int status)
+{
+ return (status >= 0) ? pte_ok : (enum pt_error_code) -status;
+}
+
+/** Return a human readable error string. */
+extern pt_export const char *pt_errstr(enum pt_error_code);
+
+
+
+/* Configuration. */
+
+
+
+/** A cpu vendor. */
+enum pt_cpu_vendor {
+ pcv_unknown,
+ pcv_intel
+};
+
+/** A cpu identifier. */
+struct pt_cpu {
+ /** The cpu vendor. */
+ enum pt_cpu_vendor vendor;
+
+ /** The cpu family. */
+ uint16_t family;
+
+ /** The cpu model. */
+ uint8_t model;
+
+ /** The stepping. */
+ uint8_t stepping;
+};
+
+/** A collection of Intel PT errata. */
+struct pt_errata {
+ /** BDM70: Intel(R) Processor Trace PSB+ Packets May Contain
+ * Unexpected Packets.
+ *
+ * Same as: SKD024, SKL021, KBL021.
+ *
+ * Some Intel Processor Trace packets should be issued only between
+ * TIP.PGE and TIP.PGD packets. Due to this erratum, when a TIP.PGE
+ * packet is generated it may be preceded by a PSB+ that incorrectly
+ * includes FUP and MODE.Exec packets.
+ */
+ uint32_t bdm70:1;
+
+ /** BDM64: An Incorrect LBR or Intel(R) Processor Trace Packet May Be
+ * Recorded Following a Transactional Abort.
+ *
+ * Use of Intel(R) Transactional Synchronization Extensions (Intel(R)
+ * TSX) may result in a transactional abort. If an abort occurs
+ * immediately following a branch instruction, an incorrect branch
+ * target may be logged in an LBR (Last Branch Record) or in an Intel(R)
+ * Processor Trace (Intel(R) PT) packet before the LBR or Intel PT
+ * packet produced by the abort.
+ */
+ uint32_t bdm64:1;
+
+ /** SKD007: Intel(R) PT Buffer Overflow May Result in Incorrect Packets.
+ *
+ * Same as: SKL049, KBL041.
+ *
+ * Under complex micro-architectural conditions, an Intel PT (Processor
+ * Trace) OVF (Overflow) packet may be issued after the first byte of a
+ * multi-byte CYC (Cycle Count) packet, instead of any remaining bytes
+ * of the CYC.
+ */
+ uint32_t skd007:1;
+
+ /** SKD022: VM Entry That Clears TraceEn May Generate a FUP.
+ *
+ * Same as: SKL024, KBL023.
+ *
+ * If VM entry clears Intel(R) PT (Intel Processor Trace)
+ * IA32_RTIT_CTL.TraceEn (MSR 570H, bit 0) while PacketEn is 1 then a
+ * FUP (Flow Update Packet) will precede the TIP.PGD (Target IP Packet,
+ * Packet Generation Disable). VM entry can clear TraceEn if the
+ * VM-entry MSR-load area includes an entry for the IA32_RTIT_CTL MSR.
+ */
+ uint32_t skd022:1;
+
+ /** SKD010: Intel(R) PT FUP May be Dropped After OVF.
+ *
+ * Same as: SKD014, SKL033, KBL030.
+ *
+ * Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not
+ * be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP
+ * Packet, Packet Generation Enable).
+ */
+ uint32_t skd010:1;
+
+ /** SKL014: Intel(R) PT TIP.PGD May Not Have Target IP Payload.
+ *
+ * Same as: KBL014.
+ *
+ * When Intel PT (Intel Processor Trace) is enabled and a direct
+ * unconditional branch clears IA32_RTIT_STATUS.FilterEn (MSR 571H, bit
+ * 0), due to this erratum, the resulting TIP.PGD (Target IP Packet,
+ * Packet Generation Disable) may not have an IP payload with the target
+ * IP.
+ */
+ uint32_t skl014:1;
+
+ /** APL12: Intel(R) PT OVF May Be Followed By An Unexpected FUP Packet.
+ *
+ * Certain Intel PT (Processor Trace) packets including FUPs (Flow
+ * Update Packets), should be issued only between TIP.PGE (Target IP
+ * Packet - Packet Generaton Enable) and TIP.PGD (Target IP Packet -
+ * Packet Generation Disable) packets. When outside a TIP.PGE/TIP.PGD
+ * pair, as a result of IA32_RTIT_STATUS.FilterEn[0] (MSR 571H) being
+ * cleared, an OVF (Overflow) packet may be unexpectedly followed by a
+ * FUP.
+ */
+ uint32_t apl12:1;
+
+ /** APL11: Intel(R) PT OVF Pakcet May Be Followed by TIP.PGD Packet
+ *
+ * If Intel PT (Processor Trace) encounters an internal buffer overflow
+ * and generates an OVF (Overflow) packet just as IA32_RTIT_CTL (MSR
+ * 570H) bit 0 (TraceEn) is cleared, or during a far transfer that
+ * causes IA32_RTIT_STATUS.ContextEn[1] (MSR 571H) to be cleared, the
+ * OVF may be followed by a TIP.PGD (Target Instruction Pointer - Packet
+ * Generation Disable) packet.
+ */
+ uint32_t apl11:1;
+
+ /* Reserve a few bytes for the future. */
+ uint32_t reserved[15];
+};
+
+/** A collection of decoder-specific configuration flags. */
+struct pt_conf_flags {
+ /** The decoder variant. */
+ union {
+ /** Flags for the block decoder. */
+ struct {
+ /** End a block after a call instruction. */
+ uint32_t end_on_call:1;
+
+ /** Enable tick events for timing updates. */
+ uint32_t enable_tick_events:1;
+
+ /** End a block after a jump instruction. */
+ uint32_t end_on_jump:1;
+ } block;
+
+ /** Flags for the instruction flow decoder. */
+ struct {
+ /** Enable tick events for timing updates. */
+ uint32_t enable_tick_events:1;
+ } insn;
+
+ /* Reserve a few bytes for future extensions. */
+ uint32_t reserved[4];
+ } variant;
+};
+
+/** The address filter configuration. */
+struct pt_conf_addr_filter {
+ /** The address filter configuration.
+ *
+ * This corresponds to the respective fields in IA32_RTIT_CTL MSR.
+ */
+ union {
+ uint64_t addr_cfg;
+
+ struct {
+ uint32_t addr0_cfg:4;
+ uint32_t addr1_cfg:4;
+ uint32_t addr2_cfg:4;
+ uint32_t addr3_cfg:4;
+ } ctl;
+ } config;
+
+ /** The address ranges configuration.
+ *
+ * This corresponds to the IA32_RTIT_ADDRn_A/B MSRs.
+ */
+ uint64_t addr0_a;
+ uint64_t addr0_b;
+ uint64_t addr1_a;
+ uint64_t addr1_b;
+ uint64_t addr2_a;
+ uint64_t addr2_b;
+ uint64_t addr3_a;
+ uint64_t addr3_b;
+
+ /* Reserve some space. */
+ uint64_t reserved[8];
+};
+
+/** An unknown packet. */
+struct pt_packet_unknown;
+
+/** An Intel PT decoder configuration.
+ */
+struct pt_config {
+ /** The size of the config structure in bytes. */
+ size_t size;
+
+ /** The trace buffer begin address. */
+ uint8_t *begin;
+
+ /** The trace buffer end address. */
+ uint8_t *end;
+
+ /** An optional callback for handling unknown packets.
+ *
+ * If \@callback is not NULL, it is called for any unknown opcode.
+ */
+ struct {
+ /** The callback function.
+ *
+ * It shall decode the packet at \@pos into \@unknown.
+ * It shall return the number of bytes read upon success.
+ * It shall return a negative pt_error_code otherwise.
+ * The below context is passed as \@context.
+ */
+ int (*callback)(struct pt_packet_unknown *unknown,
+ const struct pt_config *config,
+ const uint8_t *pos, void *context);
+
+ /** The user-defined context for this configuration. */
+ void *context;
+ } decode;
+
+ /** The cpu on which Intel PT has been recorded. */
+ struct pt_cpu cpu;
+
+ /** The errata to apply when encoding or decoding Intel PT. */
+ struct pt_errata errata;
+
+ /* The CTC frequency.
+ *
+ * This is only required if MTC packets have been enabled in
+ * IA32_RTIT_CTRL.MTCEn.
+ */
+ uint32_t cpuid_0x15_eax, cpuid_0x15_ebx;
+
+ /* The MTC frequency as defined in IA32_RTIT_CTL.MTCFreq.
+ *
+ * This is only required if MTC packets have been enabled in
+ * IA32_RTIT_CTRL.MTCEn.
+ */
+ uint8_t mtc_freq;
+
+ /* The nominal frequency as defined in MSR_PLATFORM_INFO[15:8].
+ *
+ * This is only required if CYC packets have been enabled in
+ * IA32_RTIT_CTRL.CYCEn.
+ *
+ * If zero, timing calibration will only be able to use MTC and CYC
+ * packets.
+ *
+ * If not zero, timing calibration will also be able to use CBR
+ * packets.
+ */
+ uint8_t nom_freq;
+
+ /** A collection of decoder-specific flags. */
+ struct pt_conf_flags flags;
+
+ /** The address filter configuration. */
+ struct pt_conf_addr_filter addr_filter;
+};
+
+
+/** Zero-initialize an Intel PT configuration. */
+static inline void pt_config_init(struct pt_config *config)
+{
+ memset(config, 0, sizeof(*config));
+
+ config->size = sizeof(*config);
+}
+
+/** Determine errata for a given cpu.
+ *
+ * Updates \@errata based on \@cpu.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ * Returns -pte_invalid if \@errata or \@cpu is NULL.
+ * Returns -pte_bad_cpu if \@cpu is not known.
+ */
+extern pt_export int pt_cpu_errata(struct pt_errata *errata,
+ const struct pt_cpu *cpu);
+
+
+
+/* Packet encoder / decoder. */
+
+
+
+/** Intel PT packet types. */
+enum pt_packet_type {
+ /* An invalid packet. */
+ ppt_invalid,
+
+ /* A packet decodable by the optional decoder callback. */
+ ppt_unknown,
+
+ /* Actual packets supported by this library. */
+ ppt_pad,
+ ppt_psb,
+ ppt_psbend,
+ ppt_fup,
+ ppt_tip,
+ ppt_tip_pge,
+ ppt_tip_pgd,
+ ppt_tnt_8,
+ ppt_tnt_64,
+ ppt_mode,
+ ppt_pip,
+ ppt_vmcs,
+ ppt_cbr,
+ ppt_tsc,
+ ppt_tma,
+ ppt_mtc,
+ ppt_cyc,
+ ppt_stop,
+ ppt_ovf,
+ ppt_mnt,
+ ppt_exstop,
+ ppt_mwait,
+ ppt_pwre,
+ ppt_pwrx,
+ ppt_ptw
+};
+
+/** The IP compression. */
+enum pt_ip_compression {
+ /* The bits encode the payload size and the encoding scheme.
+ *
+ * No payload. The IP has been suppressed.
+ */
+ pt_ipc_suppressed = 0x0,
+
+ /* Payload: 16 bits. Update last IP. */
+ pt_ipc_update_16 = 0x01,
+
+ /* Payload: 32 bits. Update last IP. */
+ pt_ipc_update_32 = 0x02,
+
+ /* Payload: 48 bits. Sign extend to full address. */
+ pt_ipc_sext_48 = 0x03,
+
+ /* Payload: 48 bits. Update last IP. */
+ pt_ipc_update_48 = 0x04,
+
+ /* Payload: 64 bits. Full address. */
+ pt_ipc_full = 0x06
+};
+
+/** An execution mode. */
+enum pt_exec_mode {
+ ptem_unknown,
+ ptem_16bit,
+ ptem_32bit,
+ ptem_64bit
+};
+
+/** Mode packet leaves. */
+enum pt_mode_leaf {
+ pt_mol_exec = 0x00,
+ pt_mol_tsx = 0x20
+};
+
+/** A TNT-8 or TNT-64 packet. */
+struct pt_packet_tnt {
+ /** TNT payload bit size. */
+ uint8_t bit_size;
+
+ /** TNT payload excluding stop bit. */
+ uint64_t payload;
+};
+
+/** A packet with IP payload. */
+struct pt_packet_ip {
+ /** IP compression. */
+ enum pt_ip_compression ipc;
+
+ /** Zero-extended payload ip. */
+ uint64_t ip;
+};
+
+/** A mode.exec packet. */
+struct pt_packet_mode_exec {
+ /** The mode.exec csl bit. */
+ uint32_t csl:1;
+
+ /** The mode.exec csd bit. */
+ uint32_t csd:1;
+};
+
+static inline enum pt_exec_mode
+pt_get_exec_mode(const struct pt_packet_mode_exec *packet)
+{
+ if (packet->csl)
+ return packet->csd ? ptem_unknown : ptem_64bit;
+ else
+ return packet->csd ? ptem_32bit : ptem_16bit;
+}
+
+static inline struct pt_packet_mode_exec
+pt_set_exec_mode(enum pt_exec_mode mode)
+{
+ struct pt_packet_mode_exec packet;
+
+ switch (mode) {
+ default:
+ packet.csl = 1;
+ packet.csd = 1;
+ break;
+
+ case ptem_64bit:
+ packet.csl = 1;
+ packet.csd = 0;
+ break;
+
+ case ptem_32bit:
+ packet.csl = 0;
+ packet.csd = 1;
+ break;
+
+ case ptem_16bit:
+ packet.csl = 0;
+ packet.csd = 0;
+ break;
+ }
+
+ return packet;
+}
+
+/** A mode.tsx packet. */
+struct pt_packet_mode_tsx {
+ /** The mode.tsx intx bit. */
+ uint32_t intx:1;
+
+ /** The mode.tsx abrt bit. */
+ uint32_t abrt:1;
+};
+
+/** A mode packet. */
+struct pt_packet_mode {
+ /** Mode leaf. */
+ enum pt_mode_leaf leaf;
+
+ /** Mode bits. */
+ union {
+ /** Packet: mode.exec. */
+ struct pt_packet_mode_exec exec;
+
+ /** Packet: mode.tsx. */
+ struct pt_packet_mode_tsx tsx;
+ } bits;
+};
+
+/** A PIP packet. */
+struct pt_packet_pip {
+ /** The CR3 value. */
+ uint64_t cr3;
+
+ /** The non-root bit. */
+ uint32_t nr:1;
+};
+
+/** A TSC packet. */
+struct pt_packet_tsc {
+ /** The TSC value. */
+ uint64_t tsc;
+};
+
+/** A CBR packet. */
+struct pt_packet_cbr {
+ /** The core/bus cycle ratio. */
+ uint8_t ratio;
+};
+
+/** A TMA packet. */
+struct pt_packet_tma {
+ /** The crystal clock tick counter value. */
+ uint16_t ctc;
+
+ /** The fast counter value. */
+ uint16_t fc;
+};
+
+/** A MTC packet. */
+struct pt_packet_mtc {
+ /** The crystal clock tick counter value. */
+ uint8_t ctc;
+};
+
+/** A CYC packet. */
+struct pt_packet_cyc {
+ /** The cycle counter value. */
+ uint64_t value;
+};
+
+/** A VMCS packet. */
+struct pt_packet_vmcs {
+ /* The VMCS Base Address (i.e. the shifted payload). */
+ uint64_t base;
+};
+
+/** A MNT packet. */
+struct pt_packet_mnt {
+ /** The raw payload. */
+ uint64_t payload;
+};
+
+/** A EXSTOP packet. */
+struct pt_packet_exstop {
+ /** A flag specifying the binding of the packet:
+ *
+ * set: binds to the next FUP.
+ * clear: standalone.
+ */
+ uint32_t ip:1;
+};
+
+/** A MWAIT packet. */
+struct pt_packet_mwait {
+ /** The MWAIT hints (EAX). */
+ uint32_t hints;
+
+ /** The MWAIT extensions (ECX). */
+ uint32_t ext;
+};
+
+/** A PWRE packet. */
+struct pt_packet_pwre {
+ /** The resolved thread C-state. */
+ uint8_t state;
+
+ /** The resolved thread sub C-state. */
+ uint8_t sub_state;
+
+ /** A flag indicating whether the C-state entry was initiated by h/w. */
+ uint32_t hw:1;
+};
+
+/** A PWRX packet. */
+struct pt_packet_pwrx {
+ /** The core C-state at the time of the wake. */
+ uint8_t last;
+
+ /** The deepest core C-state achieved during sleep. */
+ uint8_t deepest;
+
+ /** The wake reason:
+ *
+ * - due to external interrupt received.
+ */
+ uint32_t interrupt:1;
+
+ /** - due to store to monitored address. */
+ uint32_t store:1;
+
+ /** - due to h/w autonomous condition such as HDC. */
+ uint32_t autonomous:1;
+};
+
+/** A PTW packet. */
+struct pt_packet_ptw {
+ /** The raw payload. */
+ uint64_t payload;
+
+ /** The payload size as encoded in the packet. */
+ uint8_t plc;
+
+ /** A flag saying whether a FUP is following PTW that provides
+ * the IP of the corresponding PTWRITE instruction.
+ */
+ uint32_t ip:1;
+};
+
+static inline int pt_ptw_size(uint8_t plc)
+{
+ switch (plc) {
+ case 0:
+ return 4;
+
+ case 1:
+ return 8;
+
+ case 2:
+ case 3:
+ return -pte_bad_packet;
+ }
+
+ return -pte_internal;
+}
+
+/** An unknown packet decodable by the optional decoder callback. */
+struct pt_packet_unknown {
+ /** Pointer to the raw packet bytes. */
+ const uint8_t *packet;
+
+ /** Optional pointer to a user-defined structure. */
+ void *priv;
+};
+
+/** An Intel PT packet. */
+struct pt_packet {
+ /** The type of the packet.
+ *
+ * This also determines the \@payload field.
+ */
+ enum pt_packet_type type;
+
+ /** The size of the packet including opcode and payload. */
+ uint8_t size;
+
+ /** Packet specific data. */
+ union {
+ /** Packets: pad, ovf, psb, psbend, stop - no payload. */
+
+ /** Packet: tnt-8, tnt-64. */
+ struct pt_packet_tnt tnt;
+
+ /** Packet: tip, fup, tip.pge, tip.pgd. */
+ struct pt_packet_ip ip;
+
+ /** Packet: mode. */
+ struct pt_packet_mode mode;
+
+ /** Packet: pip. */
+ struct pt_packet_pip pip;
+
+ /** Packet: tsc. */
+ struct pt_packet_tsc tsc;
+
+ /** Packet: cbr. */
+ struct pt_packet_cbr cbr;
+
+ /** Packet: tma. */
+ struct pt_packet_tma tma;
+
+ /** Packet: mtc. */
+ struct pt_packet_mtc mtc;
+
+ /** Packet: cyc. */
+ struct pt_packet_cyc cyc;
+
+ /** Packet: vmcs. */
+ struct pt_packet_vmcs vmcs;
+
+ /** Packet: mnt. */
+ struct pt_packet_mnt mnt;
+
+ /** Packet: exstop. */
+ struct pt_packet_exstop exstop;
+
+ /** Packet: mwait. */
+ struct pt_packet_mwait mwait;
+
+ /** Packet: pwre. */
+ struct pt_packet_pwre pwre;
+
+ /** Packet: pwrx. */
+ struct pt_packet_pwrx pwrx;
+
+ /** Packet: ptw. */
+ struct pt_packet_ptw ptw;
+
+ /** Packet: unknown. */
+ struct pt_packet_unknown unknown;
+ } payload;
+};
+
+
+
+/* Packet encoder. */
+
+
+
+/** Allocate an Intel PT packet encoder.
+ *
+ * The encoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the encoder.
+ *
+ * The encoder starts at the beginning of the trace buffer.
+ */
+extern pt_export struct pt_encoder *
+pt_alloc_encoder(const struct pt_config *config);
+
+/** Free an Intel PT packet encoder.
+ *
+ * The \@encoder must not be used after a successful return.
+ */
+extern pt_export void pt_free_encoder(struct pt_encoder *encoder);
+
+/** Hard set synchronization point of an Intel PT packet encoder.
+ *
+ * Synchronize \@encoder to \@offset within the trace buffer.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_eos if the given offset is behind the end of the trace buffer.
+ * Returns -pte_invalid if \@encoder is NULL.
+ */
+extern pt_export int pt_enc_sync_set(struct pt_encoder *encoder,
+ uint64_t offset);
+
+/** Get the current packet encoder position.
+ *
+ * Fills the current \@encoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@encoder or \@offset is NULL.
+ */
+extern pt_export int pt_enc_get_offset(const struct pt_encoder *encoder,
+ uint64_t *offset);
+
+/* Return a pointer to \@encoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@encoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_enc_get_config(const struct pt_encoder *encoder);
+
+/** Encode an Intel PT packet.
+ *
+ * Writes \@packet at \@encoder's current position in the Intel PT buffer and
+ * advances the \@encoder beyond the written packet.
+ *
+ * The \@packet.size field is ignored.
+ *
+ * In case of errors, the \@encoder is not advanced and nothing is written
+ * into the Intel PT buffer.
+ *
+ * Returns the number of bytes written on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_bad_opc if \@packet.type is not known.
+ * Returns -pte_bad_packet if \@packet's payload is invalid.
+ * Returns -pte_eos if \@encoder reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@encoder or \@packet is NULL.
+ */
+extern pt_export int pt_enc_next(struct pt_encoder *encoder,
+ const struct pt_packet *packet);
+
+
+
+/* Packet decoder. */
+
+
+
+/** Allocate an Intel PT packet decoder.
+ *
+ * The decoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the decoder.
+ *
+ * The decoder needs to be synchronized before it can be used.
+ */
+extern pt_export struct pt_packet_decoder *
+pt_pkt_alloc_decoder(const struct pt_config *config);
+
+/** Free an Intel PT packet decoder.
+ *
+ * The \@decoder must not be used after a successful return.
+ */
+extern pt_export void pt_pkt_free_decoder(struct pt_packet_decoder *decoder);
+
+/** Synchronize an Intel PT packet decoder.
+ *
+ * Search for the next synchronization point in forward or backward direction.
+ *
+ * If \@decoder has not been synchronized, yet, the search is started at the
+ * beginning of the trace buffer in case of forward synchronization and at the
+ * end of the trace buffer in case of backward synchronization.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_eos if no further synchronization point is found.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_pkt_sync_forward(struct pt_packet_decoder *decoder);
+extern pt_export int pt_pkt_sync_backward(struct pt_packet_decoder *decoder);
+
+/** Hard set synchronization point of an Intel PT decoder.
+ *
+ * Synchronize \@decoder to \@offset within the trace buffer.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_eos if the given offset is behind the end of the trace buffer.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_pkt_sync_set(struct pt_packet_decoder *decoder,
+ uint64_t offset);
+
+/** Get the current decoder position.
+ *
+ * Fills the current \@decoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_pkt_get_offset(const struct pt_packet_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the position of the last synchronization point.
+ *
+ * Fills the last synchronization position into \@offset.
+ *
+ * This is useful when splitting a trace stream for parallel decoding.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int
+pt_pkt_get_sync_offset(const struct pt_packet_decoder *decoder,
+ uint64_t *offset);
+
+/* Return a pointer to \@decoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@decoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_pkt_get_config(const struct pt_packet_decoder *decoder);
+
+/** Decode the next packet and advance the decoder.
+ *
+ * Decodes the packet at \@decoder's current position into \@packet and
+ * adjusts the \@decoder's position by the number of bytes the packet had
+ * consumed.
+ *
+ * The \@size argument must be set to sizeof(struct pt_packet).
+ *
+ * Returns the number of bytes consumed on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_bad_opc if the packet is unknown.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if \@decoder reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@packet is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_pkt_next(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet, size_t size);
+
+
+
+/* Query decoder. */
+
+
+
+/** Decoder status flags. */
+enum pt_status_flag {
+ /** There is an event pending. */
+ pts_event_pending = 1 << 0,
+
+ /** The address has been suppressed. */
+ pts_ip_suppressed = 1 << 1,
+
+ /** There is no more trace data available. */
+ pts_eos = 1 << 2
+};
+
+/** Event types. */
+enum pt_event_type {
+ /* Tracing has been enabled/disabled. */
+ ptev_enabled,
+ ptev_disabled,
+
+ /* Tracing has been disabled asynchronously. */
+ ptev_async_disabled,
+
+ /* An asynchronous branch, e.g. interrupt. */
+ ptev_async_branch,
+
+ /* A synchronous paging event. */
+ ptev_paging,
+
+ /* An asynchronous paging event. */
+ ptev_async_paging,
+
+ /* Trace overflow. */
+ ptev_overflow,
+
+ /* An execution mode change. */
+ ptev_exec_mode,
+
+ /* A transactional execution state change. */
+ ptev_tsx,
+
+ /* Trace Stop. */
+ ptev_stop,
+
+ /* A synchronous vmcs event. */
+ ptev_vmcs,
+
+ /* An asynchronous vmcs event. */
+ ptev_async_vmcs,
+
+ /* Execution has stopped. */
+ ptev_exstop,
+
+ /* An MWAIT operation completed. */
+ ptev_mwait,
+
+ /* A power state was entered. */
+ ptev_pwre,
+
+ /* A power state was exited. */
+ ptev_pwrx,
+
+ /* A PTWRITE event. */
+ ptev_ptwrite,
+
+ /* A timing event. */
+ ptev_tick,
+
+ /* A core:bus ratio event. */
+ ptev_cbr,
+
+ /* A maintenance event. */
+ ptev_mnt
+};
+
+/** An event. */
+struct pt_event {
+ /** The type of the event. */
+ enum pt_event_type type;
+
+ /** A flag indicating that the event IP has been suppressed. */
+ uint32_t ip_suppressed:1;
+
+ /** A flag indicating that the event is for status update. */
+ uint32_t status_update:1;
+
+ /** A flag indicating that the event has timing information. */
+ uint32_t has_tsc:1;
+
+ /** The time stamp count of the event.
+ *
+ * This field is only valid if \@has_tsc is set.
+ */
+ uint64_t tsc;
+
+ /** The number of lost mtc and cyc packets.
+ *
+ * This gives an idea about the quality of the \@tsc. The more packets
+ * were dropped, the less precise timing is.
+ */
+ uint32_t lost_mtc;
+ uint32_t lost_cyc;
+
+ /* Reserved space for future extensions. */
+ uint64_t reserved[2];
+
+ /** Event specific data. */
+ union {
+ /** Event: enabled. */
+ struct {
+ /** The address at which tracing resumes. */
+ uint64_t ip;
+
+ /** A flag indicating that tracing resumes from the IP
+ * at which tracing had been disabled before.
+ */
+ uint32_t resumed:1;
+ } enabled;
+
+ /** Event: disabled. */
+ struct {
+ /** The destination of the first branch inside a
+ * filtered area.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+
+ /* The exact source ip needs to be determined using
+ * disassembly and the filter configuration.
+ */
+ } disabled;
+
+ /** Event: async disabled. */
+ struct {
+ /** The source address of the asynchronous branch that
+ * disabled tracing.
+ */
+ uint64_t at;
+
+ /** The destination of the first branch inside a
+ * filtered area.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+ } async_disabled;
+
+ /** Event: async branch. */
+ struct {
+ /** The branch source address. */
+ uint64_t from;
+
+ /** The branch destination address.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t to;
+ } async_branch;
+
+ /** Event: paging. */
+ struct {
+ /** The updated CR3 value.
+ *
+ * The lower 5 bit have been zeroed out.
+ * The upper bits have been zeroed out depending on the
+ * maximum possible address.
+ */
+ uint64_t cr3;
+
+ /** A flag indicating whether the cpu is operating in
+ * vmx non-root (guest) mode.
+ */
+ uint32_t non_root:1;
+
+ /* The address at which the event is effective is
+ * obvious from the disassembly.
+ */
+ } paging;
+
+ /** Event: async paging. */
+ struct {
+ /** The updated CR3 value.
+ *
+ * The lower 5 bit have been zeroed out.
+ * The upper bits have been zeroed out depending on the
+ * maximum possible address.
+ */
+ uint64_t cr3;
+
+ /** A flag indicating whether the cpu is operating in
+ * vmx non-root (guest) mode.
+ */
+ uint32_t non_root:1;
+
+ /** The address at which the event is effective. */
+ uint64_t ip;
+ } async_paging;
+
+ /** Event: overflow. */
+ struct {
+ /** The address at which tracing resumes after overflow.
+ *
+ * This field is not valid, if ip_suppressed is set.
+ * In this case, the overflow resolved while tracing
+ * was disabled.
+ */
+ uint64_t ip;
+ } overflow;
+
+ /** Event: exec mode. */
+ struct {
+ /** The execution mode. */
+ enum pt_exec_mode mode;
+
+ /** The address at which the event is effective. */
+ uint64_t ip;
+ } exec_mode;
+
+ /** Event: tsx. */
+ struct {
+ /** The address at which the event is effective.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+
+ /** A flag indicating speculative execution mode. */
+ uint32_t speculative:1;
+
+ /** A flag indicating speculative execution aborts. */
+ uint32_t aborted:1;
+ } tsx;
+
+ /** Event: vmcs. */
+ struct {
+ /** The VMCS base address.
+ *
+ * The address is zero-extended with the lower 12 bits
+ * all zero.
+ */
+ uint64_t base;
+
+ /* The new VMCS base address should be stored and
+ * applied on subsequent VM entries.
+ */
+ } vmcs;
+
+ /** Event: async vmcs. */
+ struct {
+ /** The VMCS base address.
+ *
+ * The address is zero-extended with the lower 12 bits
+ * all zero.
+ */
+ uint64_t base;
+
+ /** The address at which the event is effective. */
+ uint64_t ip;
+
+ /* An async paging event that binds to the same IP
+ * will always succeed this async vmcs event.
+ */
+ } async_vmcs;
+
+ /** Event: execution stopped. */
+ struct {
+ /** The address at which execution has stopped. This is
+ * the last instruction that did not complete.
+ *
+ * This field is not valid, if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+ } exstop;
+
+ /** Event: mwait. */
+ struct {
+ /** The address of the instruction causing the mwait.
+ *
+ * This field is not valid, if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+
+ /** The mwait hints (eax).
+ *
+ * Reserved bits are undefined.
+ */
+ uint32_t hints;
+
+ /** The mwait extensions (ecx).
+ *
+ * Reserved bits are undefined.
+ */
+ uint32_t ext;
+ } mwait;
+
+ /** Event: power state entry. */
+ struct {
+ /** The resolved thread C-state. */
+ uint8_t state;
+
+ /** The resolved thread sub C-state. */
+ uint8_t sub_state;
+
+ /** A flag indicating whether the C-state entry was
+ * initiated by h/w.
+ */
+ uint32_t hw:1;
+ } pwre;
+
+ /** Event: power state exit. */
+ struct {
+ /** The core C-state at the time of the wake. */
+ uint8_t last;
+
+ /** The deepest core C-state achieved during sleep. */
+ uint8_t deepest;
+
+ /** The wake reason:
+ *
+ * - due to external interrupt received.
+ */
+ uint32_t interrupt:1;
+
+ /** - due to store to monitored address. */
+ uint32_t store:1;
+
+ /** - due to h/w autonomous condition such as HDC. */
+ uint32_t autonomous:1;
+ } pwrx;
+
+ /** Event: ptwrite. */
+ struct {
+ /** The address of the ptwrite instruction.
+ *
+ * This field is not valid, if \@ip_suppressed is set.
+ *
+ * In this case, the address is obvious from the
+ * disassembly.
+ */
+ uint64_t ip;
+
+ /** The size of the below \@payload in bytes. */
+ uint8_t size;
+
+ /** The ptwrite payload. */
+ uint64_t payload;
+ } ptwrite;
+
+ /** Event: tick. */
+ struct {
+ /** The instruction address near which the tick occured.
+ *
+ * A timestamp can sometimes be attributed directly to
+ * an instruction (e.g. to an indirect branch that
+ * receives CYC + TIP) and sometimes not (e.g. MTC).
+ *
+ * This field is not valid, if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+ } tick;
+
+ /** Event: cbr. */
+ struct {
+ /** The core:bus ratio. */
+ uint16_t ratio;
+ } cbr;
+
+ /** Event: mnt. */
+ struct {
+ /** The raw payload. */
+ uint64_t payload;
+ } mnt;
+ } variant;
+};
+
+
+/** Allocate an Intel PT query decoder.
+ *
+ * The decoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the decoder.
+ *
+ * The decoder needs to be synchronized before it can be used.
+ */
+extern pt_export struct pt_query_decoder *
+pt_qry_alloc_decoder(const struct pt_config *config);
+
+/** Free an Intel PT query decoder.
+ *
+ * The \@decoder must not be used after a successful return.
+ */
+extern pt_export void pt_qry_free_decoder(struct pt_query_decoder *decoder);
+
+/** Synchronize an Intel PT query decoder.
+ *
+ * Search for the next synchronization point in forward or backward direction.
+ *
+ * If \@decoder has not been synchronized, yet, the search is started at the
+ * beginning of the trace buffer in case of forward synchronization and at the
+ * end of the trace buffer in case of backward synchronization.
+ *
+ * If \@ip is not NULL, set it to last ip.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if no further synchronization point is found.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_qry_sync_forward(struct pt_query_decoder *decoder,
+ uint64_t *ip);
+extern pt_export int pt_qry_sync_backward(struct pt_query_decoder *decoder,
+ uint64_t *ip);
+
+/** Manually synchronize an Intel PT query decoder.
+ *
+ * Synchronize \@decoder on the syncpoint at \@offset. There must be a PSB
+ * packet at \@offset.
+ *
+ * If \@ip is not NULL, set it to last ip.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if \@offset lies outside of \@decoder's trace buffer.
+ * Returns -pte_eos if \@decoder reaches the end of its trace buffer.
+ * Returns -pte_invalid if \@decoder is NULL.
+ * Returns -pte_nosync if there is no syncpoint at \@offset.
+ */
+extern pt_export int pt_qry_sync_set(struct pt_query_decoder *decoder,
+ uint64_t *ip, uint64_t offset);
+
+/** Get the current decoder position.
+ *
+ * Fills the current \@decoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_qry_get_offset(const struct pt_query_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the position of the last synchronization point.
+ *
+ * Fills the last synchronization position into \@offset.
+ *
+ * This is useful for splitting a trace stream for parallel decoding.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int
+pt_qry_get_sync_offset(const struct pt_query_decoder *decoder,
+ uint64_t *offset);
+
+/* Return a pointer to \@decoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@decoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_qry_get_config(const struct pt_query_decoder *decoder);
+
+/** Query whether the next unconditional branch has been taken.
+ *
+ * On success, provides 1 (taken) or 0 (not taken) in \@taken for the next
+ * conditional branch and updates \@decoder.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_bad_query if no conditional branch is found.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@taken is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_qry_cond_branch(struct pt_query_decoder *decoder,
+ int *taken);
+
+/** Get the next indirect branch destination.
+ *
+ * On success, provides the linear destination address of the next indirect
+ * branch in \@ip and updates \@decoder.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_bad_query if no indirect branch is found.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@ip is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_qry_indirect_branch(struct pt_query_decoder *decoder,
+ uint64_t *ip);
+
+/** Query the next pending event.
+ *
+ * On success, provides the next event \@event and updates \@decoder.
+ *
+ * The \@size argument must be set to sizeof(struct pt_event).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_bad_query if no event is found.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@event is NULL.
+ * Returns -pte_invalid if \@size is too small.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_qry_event(struct pt_query_decoder *decoder,
+ struct pt_event *event, size_t size);
+
+/** Query the current time.
+ *
+ * On success, provides the time at the last query in \@time.
+ *
+ * The time is similar to what a rdtsc instruction would return. Depending
+ * on the configuration, the time may not be fully accurate. If TSC is not
+ * enabled, the time is relative to the last synchronization and can't be used
+ * to correlate with other TSC-based time sources. In this case, -pte_no_time
+ * is returned and the relative time is provided in \@time.
+ *
+ * Some timing-related packets may need to be dropped (mostly due to missing
+ * calibration or incomplete configuration). To get an idea about the quality
+ * of the estimated time, we record the number of dropped MTC and CYC packets.
+ *
+ * If \@lost_mtc is not NULL, set it to the number of lost MTC packets.
+ * If \@lost_cyc is not NULL, set it to the number of lost CYC packets.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@time is NULL.
+ * Returns -pte_no_time if there has not been a TSC packet.
+ */
+extern pt_export int pt_qry_time(struct pt_query_decoder *decoder,
+ uint64_t *time, uint32_t *lost_mtc,
+ uint32_t *lost_cyc);
+
+/** Return the current core bus ratio.
+ *
+ * On success, provides the current core:bus ratio in \@cbr. The ratio is
+ * defined as core cycles per bus clock cycle.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@cbr is NULL.
+ * Returns -pte_no_cbr if there has not been a CBR packet.
+ */
+extern pt_export int pt_qry_core_bus_ratio(struct pt_query_decoder *decoder,
+ uint32_t *cbr);
+
+
+
+/* Traced image. */
+
+
+
+/** An Intel PT address space identifier.
+ *
+ * This identifies a particular address space when adding file sections or
+ * when reading memory.
+ */
+struct pt_asid {
+ /** The size of this object - set to sizeof(struct pt_asid). */
+ size_t size;
+
+ /** The CR3 value. */
+ uint64_t cr3;
+
+ /** The VMCS Base address. */
+ uint64_t vmcs;
+};
+
+/** An unknown CR3 value to be used for pt_asid objects. */
+static const uint64_t pt_asid_no_cr3 = 0xffffffffffffffffull;
+
+/** An unknown VMCS Base value to be used for pt_asid objects. */
+static const uint64_t pt_asid_no_vmcs = 0xffffffffffffffffull;
+
+/** Initialize an address space identifier. */
+static inline void pt_asid_init(struct pt_asid *asid)
+{
+ asid->size = sizeof(*asid);
+ asid->cr3 = pt_asid_no_cr3;
+ asid->vmcs = pt_asid_no_vmcs;
+}
+
+
+/** A cache of traced image sections. */
+struct pt_image_section_cache;
+
+/** Allocate a traced memory image section cache.
+ *
+ * An optional \@name may be given to the cache. The name string is copied.
+ *
+ * Returns a new traced memory image section cache on success, NULL otherwise.
+ */
+extern pt_export struct pt_image_section_cache *
+pt_iscache_alloc(const char *name);
+
+/** Free a traced memory image section cache.
+ *
+ * The \@iscache must have been allocated with pt_iscache_alloc().
+ * The \@iscache must not be used after a successful return.
+ */
+extern pt_export void pt_iscache_free(struct pt_image_section_cache *iscache);
+
+/** Set the image section cache limit.
+ *
+ * Set the limit for a section cache in bytes. A non-zero limit will keep the
+ * least recently used sections mapped until the limit is reached. A limit of
+ * zero disables caching.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ * Returns -pte_invalid if \@iscache is NULL.
+ */
+extern pt_export int
+pt_iscache_set_limit(struct pt_image_section_cache *iscache, uint64_t limit);
+
+/** Get the image section cache name.
+ *
+ * Returns a pointer to \@iscache's name or NULL if there is no name.
+ */
+extern pt_export const char *
+pt_iscache_name(const struct pt_image_section_cache *iscache);
+
+/** Add a new file section to the traced memory image section cache.
+ *
+ * Adds a new section consisting of \@size bytes starting at \@offset in
+ * \@filename loaded at the virtual address \@vaddr if \@iscache does not
+ * already contain such a section.
+ *
+ * Returns an image section identifier (isid) uniquely identifying that section
+ * in \@iscache.
+ *
+ * The section is silently truncated to match the size of \@filename.
+ *
+ * Returns a positive isid on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@iscache or \@filename is NULL.
+ * Returns -pte_invalid if \@offset is too big.
+ */
+extern pt_export int pt_iscache_add_file(struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset,
+ uint64_t size, uint64_t vaddr);
+
+/** Read memory from a cached file section
+ *
+ * Reads \@size bytes of memory starting at virtual address \@vaddr in the
+ * section identified by \@isid in \@iscache into \@buffer.
+ *
+ * The caller is responsible for allocating a \@buffer of at least \@size bytes.
+ *
+ * The read request may be truncated if it crosses section boundaries or if
+ * \@size is getting too big. We support reading at least 4Kbyte in one chunk
+ * unless the read would cross a section boundary.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@iscache or \@buffer is NULL.
+ * Returns -pte_invalid if \@size is zero.
+ * Returns -pte_nomap if \@vaddr is not contained in section \@isid.
+ * Returns -pte_bad_image if \@iscache does not contain \@isid.
+ */
+extern pt_export int pt_iscache_read(struct pt_image_section_cache *iscache,
+ uint8_t *buffer, uint64_t size, int isid,
+ uint64_t vaddr);
+
+/** The traced memory image. */
+struct pt_image;
+
+
+/** Allocate a traced memory image.
+ *
+ * An optional \@name may be given to the image. The name string is copied.
+ *
+ * Returns a new traced memory image on success, NULL otherwise.
+ */
+extern pt_export struct pt_image *pt_image_alloc(const char *name);
+
+/** Free a traced memory image.
+ *
+ * The \@image must have been allocated with pt_image_alloc().
+ * The \@image must not be used after a successful return.
+ */
+extern pt_export void pt_image_free(struct pt_image *image);
+
+/** Get the image name.
+ *
+ * Returns a pointer to \@image's name or NULL if there is no name.
+ */
+extern pt_export const char *pt_image_name(const struct pt_image *image);
+
+/** Add a new file section to the traced memory image.
+ *
+ * Adds \@size bytes starting at \@offset in \@filename. The section is
+ * loaded at the virtual address \@vaddr in the address space \@asid.
+ *
+ * The \@asid may be NULL or (partially) invalid. In that case only the valid
+ * fields are considered when comparing with other address-spaces. Use this
+ * when tracing a single process or when adding sections to all processes.
+ *
+ * The section is silently truncated to match the size of \@filename.
+ *
+ * Existing sections that would overlap with the new section will be shrunk
+ * or split.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@image or \@filename is NULL.
+ * Returns -pte_invalid if \@offset is too big.
+ */
+extern pt_export int pt_image_add_file(struct pt_image *image,
+ const char *filename, uint64_t offset,
+ uint64_t size,
+ const struct pt_asid *asid,
+ uint64_t vaddr);
+
+/** Add a section from an image section cache.
+ *
+ * Add the section from \@iscache identified by \@isid in address space \@asid.
+ *
+ * Existing sections that would overlap with the new section will be shrunk
+ * or split.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_invalid if \@image or \@iscache is NULL.
+ * Returns -pte_bad_image if \@iscache does not contain \@isid.
+ */
+extern pt_export int pt_image_add_cached(struct pt_image *image,
+ struct pt_image_section_cache *iscache,
+ int isid, const struct pt_asid *asid);
+
+/** Copy an image.
+ *
+ * Adds all sections from \@src to \@image. Sections that could not be added
+ * will be ignored.
+ *
+ * Returns the number of ignored sections on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_invalid if \@image or \@src is NULL.
+ */
+extern pt_export int pt_image_copy(struct pt_image *image,
+ const struct pt_image *src);
+
+/** Remove all sections loaded from a file.
+ *
+ * Removes all sections loaded from \@filename from the address space \@asid.
+ * Specify the same \@asid that was used for adding sections from \@filename.
+ *
+ * Returns the number of removed sections on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_invalid if \@image or \@filename is NULL.
+ */
+extern pt_export int pt_image_remove_by_filename(struct pt_image *image,
+ const char *filename,
+ const struct pt_asid *asid);
+
+/** Remove all sections loaded into an address space.
+ *
+ * Removes all sections loaded into \@asid. Specify the same \@asid that was
+ * used for adding sections.
+ *
+ * Returns the number of removed sections on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_invalid if \@image is NULL.
+ */
+extern pt_export int pt_image_remove_by_asid(struct pt_image *image,
+ const struct pt_asid *asid);
+
+/** A read memory callback function.
+ *
+ * It shall read \@size bytes of memory from address space \@asid starting
+ * at \@ip into \@buffer.
+ *
+ * It shall return the number of bytes read on success.
+ * It shall return a negative pt_error_code otherwise.
+ */
+typedef int (read_memory_callback_t)(uint8_t *buffer, size_t size,
+ const struct pt_asid *asid,
+ uint64_t ip, void *context);
+
+/** Set the memory callback for the traced memory image.
+ *
+ * Sets \@callback for reading memory. The callback is used for addresses
+ * that are not found in file sections. The \@context argument is passed
+ * to \@callback on each use.
+ *
+ * There can only be one callback at any time. A subsequent call will replace
+ * the previous callback. If \@callback is NULL, the callback is removed.
+ *
+ * Returns -pte_invalid if \@image is NULL.
+ */
+extern pt_export int pt_image_set_callback(struct pt_image *image,
+ read_memory_callback_t *callback,
+ void *context);
+
+
+
+/* Instruction flow decoder. */
+
+
+
+/** The instruction class.
+ *
+ * We provide only a very coarse classification suitable for reconstructing
+ * the execution flow.
+ */
+enum pt_insn_class {
+ /* The instruction could not be classified. */
+ ptic_error,
+
+ /* The instruction is something not listed below. */
+ ptic_other,
+
+ /* The instruction is a near (function) call. */
+ ptic_call,
+
+ /* The instruction is a near (function) return. */
+ ptic_return,
+
+ /* The instruction is a near unconditional jump. */
+ ptic_jump,
+
+ /* The instruction is a near conditional jump. */
+ ptic_cond_jump,
+
+ /* The instruction is a call-like far transfer.
+ * E.g. SYSCALL, SYSENTER, or FAR CALL.
+ */
+ ptic_far_call,
+
+ /* The instruction is a return-like far transfer.
+ * E.g. SYSRET, SYSEXIT, IRET, or FAR RET.
+ */
+ ptic_far_return,
+
+ /* The instruction is a jump-like far transfer.
+ * E.g. FAR JMP.
+ */
+ ptic_far_jump,
+
+ /* The instruction is a PTWRITE. */
+ ptic_ptwrite
+};
+
+/** The maximal size of an instruction. */
+enum {
+ pt_max_insn_size = 15
+};
+
+/** A single traced instruction. */
+struct pt_insn {
+ /** The virtual address in its process. */
+ uint64_t ip;
+
+ /** The image section identifier for the section containing this
+ * instruction.
+ *
+ * A value of zero means that the section did not have an identifier.
+ * The section was not added via an image section cache or the memory
+ * was read via the read memory callback.
+ */
+ int isid;
+
+ /** The execution mode. */
+ enum pt_exec_mode mode;
+
+ /** A coarse classification. */
+ enum pt_insn_class iclass;
+
+ /** The raw bytes. */
+ uint8_t raw[pt_max_insn_size];
+
+ /** The size in bytes. */
+ uint8_t size;
+
+ /** A collection of flags giving additional information:
+ *
+ * - the instruction was executed speculatively.
+ */
+ uint32_t speculative:1;
+
+ /** - this instruction is truncated in its image section.
+ *
+ * It starts in the image section identified by \@isid and continues
+ * in one or more other sections.
+ */
+ uint32_t truncated:1;
+};
+
+
+/** Allocate an Intel PT instruction flow decoder.
+ *
+ * The decoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the decoder.
+ *
+ * The decoder needs to be synchronized before it can be used.
+ */
+extern pt_export struct pt_insn_decoder *
+pt_insn_alloc_decoder(const struct pt_config *config);
+
+/** Free an Intel PT instruction flow decoder.
+ *
+ * This will destroy the decoder's default image.
+ *
+ * The \@decoder must not be used after a successful return.
+ */
+extern pt_export void pt_insn_free_decoder(struct pt_insn_decoder *decoder);
+
+/** Synchronize an Intel PT instruction flow decoder.
+ *
+ * Search for the next synchronization point in forward or backward direction.
+ *
+ * If \@decoder has not been synchronized, yet, the search is started at the
+ * beginning of the trace buffer in case of forward synchronization and at the
+ * end of the trace buffer in case of backward synchronization.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if no further synchronization point is found.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_insn_sync_forward(struct pt_insn_decoder *decoder);
+extern pt_export int pt_insn_sync_backward(struct pt_insn_decoder *decoder);
+
+/** Manually synchronize an Intel PT instruction flow decoder.
+ *
+ * Synchronize \@decoder on the syncpoint at \@offset. There must be a PSB
+ * packet at \@offset.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if \@offset lies outside of \@decoder's trace buffer.
+ * Returns -pte_eos if \@decoder reaches the end of its trace buffer.
+ * Returns -pte_invalid if \@decoder is NULL.
+ * Returns -pte_nosync if there is no syncpoint at \@offset.
+ */
+extern pt_export int pt_insn_sync_set(struct pt_insn_decoder *decoder,
+ uint64_t offset);
+
+/** Get the current decoder position.
+ *
+ * Fills the current \@decoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_insn_get_offset(const struct pt_insn_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the position of the last synchronization point.
+ *
+ * Fills the last synchronization position into \@offset.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int
+pt_insn_get_sync_offset(const struct pt_insn_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the traced image.
+ *
+ * The returned image may be modified as long as no decoder that uses this
+ * image is running.
+ *
+ * Returns a pointer to the traced image the decoder uses for reading memory.
+ * Returns NULL if \@decoder is NULL.
+ */
+extern pt_export struct pt_image *
+pt_insn_get_image(struct pt_insn_decoder *decoder);
+
+/** Set the traced image.
+ *
+ * Sets the image that \@decoder uses for reading memory to \@image. If \@image
+ * is NULL, sets the image to \@decoder's default image.
+ *
+ * Only one image can be active at any time.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Return -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_insn_set_image(struct pt_insn_decoder *decoder,
+ struct pt_image *image);
+
+/* Return a pointer to \@decoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@decoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_insn_get_config(const struct pt_insn_decoder *decoder);
+
+/** Return the current time.
+ *
+ * On success, provides the time at the last preceding timing packet in \@time.
+ *
+ * The time is similar to what a rdtsc instruction would return. Depending
+ * on the configuration, the time may not be fully accurate. If TSC is not
+ * enabled, the time is relative to the last synchronization and can't be used
+ * to correlate with other TSC-based time sources. In this case, -pte_no_time
+ * is returned and the relative time is provided in \@time.
+ *
+ * Some timing-related packets may need to be dropped (mostly due to missing
+ * calibration or incomplete configuration). To get an idea about the quality
+ * of the estimated time, we record the number of dropped MTC and CYC packets.
+ *
+ * If \@lost_mtc is not NULL, set it to the number of lost MTC packets.
+ * If \@lost_cyc is not NULL, set it to the number of lost CYC packets.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@time is NULL.
+ * Returns -pte_no_time if there has not been a TSC packet.
+ */
+extern pt_export int pt_insn_time(struct pt_insn_decoder *decoder,
+ uint64_t *time, uint32_t *lost_mtc,
+ uint32_t *lost_cyc);
+
+/** Return the current core bus ratio.
+ *
+ * On success, provides the current core:bus ratio in \@cbr. The ratio is
+ * defined as core cycles per bus clock cycle.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@cbr is NULL.
+ * Returns -pte_no_cbr if there has not been a CBR packet.
+ */
+extern pt_export int pt_insn_core_bus_ratio(struct pt_insn_decoder *decoder,
+ uint32_t *cbr);
+
+/** Return the current address space identifier.
+ *
+ * On success, provides the current address space identifier in \@asid.
+ *
+ * The \@size argument must be set to sizeof(struct pt_asid). At most \@size
+ * bytes will be copied and \@asid->size will be set to the actual size of the
+ * provided address space identifier.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@asid is NULL.
+ */
+extern pt_export int pt_insn_asid(const struct pt_insn_decoder *decoder,
+ struct pt_asid *asid, size_t size);
+
+/** Determine the next instruction.
+ *
+ * On success, provides the next instruction in execution order in \@insn.
+ *
+ * The \@size argument must be set to sizeof(struct pt_insn).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns pts_eos to indicate the end of the trace stream. Subsequent calls
+ * to pt_insn_next() will continue to return pts_eos until trace is required
+ * to determine the next instruction.
+ *
+ * Returns -pte_bad_context if the decoder encountered an unexpected packet.
+ * Returns -pte_bad_opc if the decoder encountered unknown packets.
+ * Returns -pte_bad_packet if the decoder encountered unknown packet payloads.
+ * Returns -pte_bad_query if the decoder got out of sync.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@insn is NULL.
+ * Returns -pte_nomap if the memory at the instruction address can't be read.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_insn_next(struct pt_insn_decoder *decoder,
+ struct pt_insn *insn, size_t size);
+
+/** Get the next pending event.
+ *
+ * On success, provides the next event in \@event and updates \@decoder.
+ *
+ * The \@size argument must be set to sizeof(struct pt_event).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_query if there is no event.
+ * Returns -pte_invalid if \@decoder or \@event is NULL.
+ * Returns -pte_invalid if \@size is too small.
+ */
+extern pt_export int pt_insn_event(struct pt_insn_decoder *decoder,
+ struct pt_event *event, size_t size);
+
+
+
+/* Block decoder. */
+
+
+
+/** A block of instructions.
+ *
+ * Instructions in this block are executed sequentially but are not necessarily
+ * contiguous in memory. Users are expected to follow direct branches.
+ */
+struct pt_block {
+ /** The IP of the first instruction in this block. */
+ uint64_t ip;
+
+ /** The IP of the last instruction in this block.
+ *
+ * This can be used for error-detection.
+ */
+ uint64_t end_ip;
+
+ /** The image section that contains the instructions in this block.
+ *
+ * A value of zero means that the section did not have an identifier.
+ * The section was not added via an image section cache or the memory
+ * was read via the read memory callback.
+ */
+ int isid;
+
+ /** The execution mode for all instructions in this block. */
+ enum pt_exec_mode mode;
+
+ /** The instruction class for the last instruction in this block.
+ *
+ * This field may be set to ptic_error to indicate that the instruction
+ * class is not available. The block decoder may choose to not provide
+ * the instruction class in some cases for performance reasons.
+ */
+ enum pt_insn_class iclass;
+
+ /** The number of instructions in this block. */
+ uint16_t ninsn;
+
+ /** The raw bytes of the last instruction in this block in case the
+ * instruction does not fit entirely into this block's section.
+ *
+ * This field is only valid if \@truncated is set.
+ */
+ uint8_t raw[pt_max_insn_size];
+
+ /** The size of the last instruction in this block in bytes.
+ *
+ * This field is only valid if \@truncated is set.
+ */
+ uint8_t size;
+
+ /** A collection of flags giving additional information about the
+ * instructions in this block.
+ *
+ * - all instructions in this block were executed speculatively.
+ */
+ uint32_t speculative:1;
+
+ /** - the last instruction in this block is truncated.
+ *
+ * It starts in this block's section but continues in one or more
+ * other sections depending on how fragmented the memory image is.
+ *
+ * The raw bytes for the last instruction are provided in \@raw and
+ * its size in \@size in this case.
+ */
+ uint32_t truncated:1;
+};
+
+/** Allocate an Intel PT block decoder.
+ *
+ * The decoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the decoder.
+ *
+ * The decoder needs to be synchronized before it can be used.
+ */
+extern pt_export struct pt_block_decoder *
+pt_blk_alloc_decoder(const struct pt_config *config);
+
+/** Free an Intel PT block decoder.
+ *
+ * This will destroy the decoder's default image.
+ *
+ * The \@decoder must not be used after a successful return.
+ */
+extern pt_export void pt_blk_free_decoder(struct pt_block_decoder *decoder);
+
+/** Synchronize an Intel PT block decoder.
+ *
+ * Search for the next synchronization point in forward or backward direction.
+ *
+ * If \@decoder has not been synchronized, yet, the search is started at the
+ * beginning of the trace buffer in case of forward synchronization and at the
+ * end of the trace buffer in case of backward synchronization.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if no further synchronization point is found.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_blk_sync_forward(struct pt_block_decoder *decoder);
+extern pt_export int pt_blk_sync_backward(struct pt_block_decoder *decoder);
+
+/** Manually synchronize an Intel PT block decoder.
+ *
+ * Synchronize \@decoder on the syncpoint at \@offset. There must be a PSB
+ * packet at \@offset.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if \@offset lies outside of \@decoder's trace buffer.
+ * Returns -pte_eos if \@decoder reaches the end of its trace buffer.
+ * Returns -pte_invalid if \@decoder is NULL.
+ * Returns -pte_nosync if there is no syncpoint at \@offset.
+ */
+extern pt_export int pt_blk_sync_set(struct pt_block_decoder *decoder,
+ uint64_t offset);
+
+/** Get the current decoder position.
+ *
+ * Fills the current \@decoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_blk_get_offset(const struct pt_block_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the position of the last synchronization point.
+ *
+ * Fills the last synchronization position into \@offset.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int
+pt_blk_get_sync_offset(const struct pt_block_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the traced image.
+ *
+ * The returned image may be modified as long as \@decoder is not running.
+ *
+ * Returns a pointer to the traced image \@decoder uses for reading memory.
+ * Returns NULL if \@decoder is NULL.
+ */
+extern pt_export struct pt_image *
+pt_blk_get_image(struct pt_block_decoder *decoder);
+
+/** Set the traced image.
+ *
+ * Sets the image that \@decoder uses for reading memory to \@image. If \@image
+ * is NULL, sets the image to \@decoder's default image.
+ *
+ * Only one image can be active at any time.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Return -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_blk_set_image(struct pt_block_decoder *decoder,
+ struct pt_image *image);
+
+/* Return a pointer to \@decoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@decoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_blk_get_config(const struct pt_block_decoder *decoder);
+
+/** Return the current time.
+ *
+ * On success, provides the time at the last preceding timing packet in \@time.
+ *
+ * The time is similar to what a rdtsc instruction would return. Depending
+ * on the configuration, the time may not be fully accurate. If TSC is not
+ * enabled, the time is relative to the last synchronization and can't be used
+ * to correlate with other TSC-based time sources. In this case, -pte_no_time
+ * is returned and the relative time is provided in \@time.
+ *
+ * Some timing-related packets may need to be dropped (mostly due to missing
+ * calibration or incomplete configuration). To get an idea about the quality
+ * of the estimated time, we record the number of dropped MTC and CYC packets.
+ *
+ * If \@lost_mtc is not NULL, set it to the number of lost MTC packets.
+ * If \@lost_cyc is not NULL, set it to the number of lost CYC packets.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@time is NULL.
+ * Returns -pte_no_time if there has not been a TSC packet.
+ */
+extern pt_export int pt_blk_time(struct pt_block_decoder *decoder,
+ uint64_t *time, uint32_t *lost_mtc,
+ uint32_t *lost_cyc);
+
+/** Return the current core bus ratio.
+ *
+ * On success, provides the current core:bus ratio in \@cbr. The ratio is
+ * defined as core cycles per bus clock cycle.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@cbr is NULL.
+ * Returns -pte_no_cbr if there has not been a CBR packet.
+ */
+extern pt_export int pt_blk_core_bus_ratio(struct pt_block_decoder *decoder,
+ uint32_t *cbr);
+
+/** Return the current address space identifier.
+ *
+ * On success, provides the current address space identifier in \@asid.
+ *
+ * The \@size argument must be set to sizeof(struct pt_asid). At most \@size
+ * bytes will be copied and \@asid->size will be set to the actual size of the
+ * provided address space identifier.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@asid is NULL.
+ */
+extern pt_export int pt_blk_asid(const struct pt_block_decoder *decoder,
+ struct pt_asid *asid, size_t size);
+
+/** Determine the next block of instructions.
+ *
+ * On success, provides the next block of instructions in execution order in
+ * \@block.
+ *
+ * The \@size argument must be set to sizeof(struct pt_block).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns pts_eos to indicate the end of the trace stream. Subsequent calls
+ * to pt_block_next() will continue to return pts_eos until trace is required
+ * to determine the next instruction.
+ *
+ * Returns -pte_bad_context if the decoder encountered an unexpected packet.
+ * Returns -pte_bad_opc if the decoder encountered unknown packets.
+ * Returns -pte_bad_packet if the decoder encountered unknown packet payloads.
+ * Returns -pte_bad_query if the decoder got out of sync.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@block is NULL.
+ * Returns -pte_nomap if the memory at the instruction address can't be read.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_blk_next(struct pt_block_decoder *decoder,
+ struct pt_block *block, size_t size);
+
+/** Get the next pending event.
+ *
+ * On success, provides the next event in \@event and updates \@decoder.
+ *
+ * The \@size argument must be set to sizeof(struct pt_event).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_query if there is no event.
+ * Returns -pte_invalid if \@decoder or \@event is NULL.
+ * Returns -pte_invalid if \@size is too small.
+ */
+extern pt_export int pt_blk_event(struct pt_block_decoder *decoder,
+ struct pt_event *event, size_t size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* INTEL_PT_H */
diff --git a/contrib/processor-trace/libipt/include/intel-pt.h.in b/contrib/processor-trace/libipt/include/intel-pt.h.in
new file mode 100755
index 0000000000000..de1c6275c8063
--- /dev/null
+++ b/contrib/processor-trace/libipt/include/intel-pt.h.in
@@ -0,0 +1,2463 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef INTEL_PT_H
+#define INTEL_PT_H
+
+#include <stdint.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Intel(R) Processor Trace (Intel PT) decoder library.
+ *
+ * This file is logically structured into the following sections:
+ *
+ * - Version
+ * - Errors
+ * - Configuration
+ * - Packet encoder / decoder
+ * - Query decoder
+ * - Traced image
+ * - Instruction flow decoder
+ * - Block decoder
+ */
+
+
+
+struct pt_encoder;
+struct pt_packet_decoder;
+struct pt_query_decoder;
+struct pt_insn_decoder;
+struct pt_block_decoder;
+
+
+
+/* A macro to mark functions as exported. */
+#ifndef pt_export
+# if defined(__GNUC__)
+# define pt_export __attribute__((visibility("default")))
+# elif defined(_MSC_VER)
+# define pt_export __declspec(dllimport)
+# else
+# error "unknown compiler"
+# endif
+#endif
+
+
+
+/* Version. */
+
+
+/** The header version. */
+#define LIBIPT_VERSION_MAJOR ${PT_VERSION_MAJOR}
+#define LIBIPT_VERSION_MINOR ${PT_VERSION_MINOR}
+
+#define LIBIPT_VERSION ((LIBIPT_VERSION_MAJOR << 8) + LIBIPT_VERSION_MINOR)
+
+
+/** The library version. */
+struct pt_version {
+ /** Major version number. */
+ uint8_t major;
+
+ /** Minor version number. */
+ uint8_t minor;
+
+ /** Reserved bits. */
+ uint16_t reserved;
+
+ /** Build number. */
+ uint32_t build;
+
+ /** Version extension. */
+ const char *ext;
+};
+
+
+/** Return the library version. */
+extern pt_export struct pt_version pt_library_version(void);
+
+
+
+/* Errors. */
+
+
+
+/** Error codes. */
+enum pt_error_code {
+ /* No error. Everything is OK. */
+ pte_ok,
+
+ /* Internal decoder error. */
+ pte_internal,
+
+ /* Invalid argument. */
+ pte_invalid,
+
+ /* Decoder out of sync. */
+ pte_nosync,
+
+ /* Unknown opcode. */
+ pte_bad_opc,
+
+ /* Unknown payload. */
+ pte_bad_packet,
+
+ /* Unexpected packet context. */
+ pte_bad_context,
+
+ /* Decoder reached end of trace stream. */
+ pte_eos,
+
+ /* No packet matching the query to be found. */
+ pte_bad_query,
+
+ /* Decoder out of memory. */
+ pte_nomem,
+
+ /* Bad configuration. */
+ pte_bad_config,
+
+ /* There is no IP. */
+ pte_noip,
+
+ /* The IP has been suppressed. */
+ pte_ip_suppressed,
+
+ /* There is no memory mapped at the requested address. */
+ pte_nomap,
+
+ /* An instruction could not be decoded. */
+ pte_bad_insn,
+
+ /* No wall-clock time is available. */
+ pte_no_time,
+
+ /* No core:bus ratio available. */
+ pte_no_cbr,
+
+ /* Bad traced image. */
+ pte_bad_image,
+
+ /* A locking error. */
+ pte_bad_lock,
+
+ /* The requested feature is not supported. */
+ pte_not_supported,
+
+ /* The return address stack is empty. */
+ pte_retstack_empty,
+
+ /* A compressed return is not indicated correctly by a taken branch. */
+ pte_bad_retcomp,
+
+ /* The current decoder state does not match the state in the trace. */
+ pte_bad_status_update,
+
+ /* The trace did not contain an expected enabled event. */
+ pte_no_enable,
+
+ /* An event was ignored. */
+ pte_event_ignored,
+
+ /* Something overflowed. */
+ pte_overflow,
+
+ /* A file handling error. */
+ pte_bad_file,
+
+ /* Unknown cpu. */
+ pte_bad_cpu
+};
+
+
+/** Decode a function return value into an pt_error_code. */
+static inline enum pt_error_code pt_errcode(int status)
+{
+ return (status >= 0) ? pte_ok : (enum pt_error_code) -status;
+}
+
+/** Return a human readable error string. */
+extern pt_export const char *pt_errstr(enum pt_error_code);
+
+
+
+/* Configuration. */
+
+
+
+/** A cpu vendor. */
+enum pt_cpu_vendor {
+ pcv_unknown,
+ pcv_intel
+};
+
+/** A cpu identifier. */
+struct pt_cpu {
+ /** The cpu vendor. */
+ enum pt_cpu_vendor vendor;
+
+ /** The cpu family. */
+ uint16_t family;
+
+ /** The cpu model. */
+ uint8_t model;
+
+ /** The stepping. */
+ uint8_t stepping;
+};
+
+/** A collection of Intel PT errata. */
+struct pt_errata {
+ /** BDM70: Intel(R) Processor Trace PSB+ Packets May Contain
+ * Unexpected Packets.
+ *
+ * Same as: SKD024, SKL021, KBL021.
+ *
+ * Some Intel Processor Trace packets should be issued only between
+ * TIP.PGE and TIP.PGD packets. Due to this erratum, when a TIP.PGE
+ * packet is generated it may be preceded by a PSB+ that incorrectly
+ * includes FUP and MODE.Exec packets.
+ */
+ uint32_t bdm70:1;
+
+ /** BDM64: An Incorrect LBR or Intel(R) Processor Trace Packet May Be
+ * Recorded Following a Transactional Abort.
+ *
+ * Use of Intel(R) Transactional Synchronization Extensions (Intel(R)
+ * TSX) may result in a transactional abort. If an abort occurs
+ * immediately following a branch instruction, an incorrect branch
+ * target may be logged in an LBR (Last Branch Record) or in an Intel(R)
+ * Processor Trace (Intel(R) PT) packet before the LBR or Intel PT
+ * packet produced by the abort.
+ */
+ uint32_t bdm64:1;
+
+ /** SKD007: Intel(R) PT Buffer Overflow May Result in Incorrect Packets.
+ *
+ * Same as: SKL049, KBL041.
+ *
+ * Under complex micro-architectural conditions, an Intel PT (Processor
+ * Trace) OVF (Overflow) packet may be issued after the first byte of a
+ * multi-byte CYC (Cycle Count) packet, instead of any remaining bytes
+ * of the CYC.
+ */
+ uint32_t skd007:1;
+
+ /** SKD022: VM Entry That Clears TraceEn May Generate a FUP.
+ *
+ * Same as: SKL024, KBL023.
+ *
+ * If VM entry clears Intel(R) PT (Intel Processor Trace)
+ * IA32_RTIT_CTL.TraceEn (MSR 570H, bit 0) while PacketEn is 1 then a
+ * FUP (Flow Update Packet) will precede the TIP.PGD (Target IP Packet,
+ * Packet Generation Disable). VM entry can clear TraceEn if the
+ * VM-entry MSR-load area includes an entry for the IA32_RTIT_CTL MSR.
+ */
+ uint32_t skd022:1;
+
+ /** SKD010: Intel(R) PT FUP May be Dropped After OVF.
+ *
+ * Same as: SKD014, SKL033, KBL030.
+ *
+ * Some Intel PT (Intel Processor Trace) OVF (Overflow) packets may not
+ * be followed by a FUP (Flow Update Packet) or TIP.PGE (Target IP
+ * Packet, Packet Generation Enable).
+ */
+ uint32_t skd010:1;
+
+ /** SKL014: Intel(R) PT TIP.PGD May Not Have Target IP Payload.
+ *
+ * Same as: KBL014.
+ *
+ * When Intel PT (Intel Processor Trace) is enabled and a direct
+ * unconditional branch clears IA32_RTIT_STATUS.FilterEn (MSR 571H, bit
+ * 0), due to this erratum, the resulting TIP.PGD (Target IP Packet,
+ * Packet Generation Disable) may not have an IP payload with the target
+ * IP.
+ */
+ uint32_t skl014:1;
+
+ /** APL12: Intel(R) PT OVF May Be Followed By An Unexpected FUP Packet.
+ *
+ * Certain Intel PT (Processor Trace) packets including FUPs (Flow
+ * Update Packets), should be issued only between TIP.PGE (Target IP
+ * Packet - Packet Generaton Enable) and TIP.PGD (Target IP Packet -
+ * Packet Generation Disable) packets. When outside a TIP.PGE/TIP.PGD
+ * pair, as a result of IA32_RTIT_STATUS.FilterEn[0] (MSR 571H) being
+ * cleared, an OVF (Overflow) packet may be unexpectedly followed by a
+ * FUP.
+ */
+ uint32_t apl12:1;
+
+ /** APL11: Intel(R) PT OVF Pakcet May Be Followed by TIP.PGD Packet
+ *
+ * If Intel PT (Processor Trace) encounters an internal buffer overflow
+ * and generates an OVF (Overflow) packet just as IA32_RTIT_CTL (MSR
+ * 570H) bit 0 (TraceEn) is cleared, or during a far transfer that
+ * causes IA32_RTIT_STATUS.ContextEn[1] (MSR 571H) to be cleared, the
+ * OVF may be followed by a TIP.PGD (Target Instruction Pointer - Packet
+ * Generation Disable) packet.
+ */
+ uint32_t apl11:1;
+
+ /* Reserve a few bytes for the future. */
+ uint32_t reserved[15];
+};
+
+/** A collection of decoder-specific configuration flags. */
+struct pt_conf_flags {
+ /** The decoder variant. */
+ union {
+ /** Flags for the block decoder. */
+ struct {
+ /** End a block after a call instruction. */
+ uint32_t end_on_call:1;
+
+ /** Enable tick events for timing updates. */
+ uint32_t enable_tick_events:1;
+
+ /** End a block after a jump instruction. */
+ uint32_t end_on_jump:1;
+ } block;
+
+ /** Flags for the instruction flow decoder. */
+ struct {
+ /** Enable tick events for timing updates. */
+ uint32_t enable_tick_events:1;
+ } insn;
+
+ /* Reserve a few bytes for future extensions. */
+ uint32_t reserved[4];
+ } variant;
+};
+
+/** The address filter configuration. */
+struct pt_conf_addr_filter {
+ /** The address filter configuration.
+ *
+ * This corresponds to the respective fields in IA32_RTIT_CTL MSR.
+ */
+ union {
+ uint64_t addr_cfg;
+
+ struct {
+ uint32_t addr0_cfg:4;
+ uint32_t addr1_cfg:4;
+ uint32_t addr2_cfg:4;
+ uint32_t addr3_cfg:4;
+ } ctl;
+ } config;
+
+ /** The address ranges configuration.
+ *
+ * This corresponds to the IA32_RTIT_ADDRn_A/B MSRs.
+ */
+ uint64_t addr0_a;
+ uint64_t addr0_b;
+ uint64_t addr1_a;
+ uint64_t addr1_b;
+ uint64_t addr2_a;
+ uint64_t addr2_b;
+ uint64_t addr3_a;
+ uint64_t addr3_b;
+
+ /* Reserve some space. */
+ uint64_t reserved[8];
+};
+
+/** An unknown packet. */
+struct pt_packet_unknown;
+
+/** An Intel PT decoder configuration.
+ */
+struct pt_config {
+ /** The size of the config structure in bytes. */
+ size_t size;
+
+ /** The trace buffer begin address. */
+ uint8_t *begin;
+
+ /** The trace buffer end address. */
+ uint8_t *end;
+
+ /** An optional callback for handling unknown packets.
+ *
+ * If \@callback is not NULL, it is called for any unknown opcode.
+ */
+ struct {
+ /** The callback function.
+ *
+ * It shall decode the packet at \@pos into \@unknown.
+ * It shall return the number of bytes read upon success.
+ * It shall return a negative pt_error_code otherwise.
+ * The below context is passed as \@context.
+ */
+ int (*callback)(struct pt_packet_unknown *unknown,
+ const struct pt_config *config,
+ const uint8_t *pos, void *context);
+
+ /** The user-defined context for this configuration. */
+ void *context;
+ } decode;
+
+ /** The cpu on which Intel PT has been recorded. */
+ struct pt_cpu cpu;
+
+ /** The errata to apply when encoding or decoding Intel PT. */
+ struct pt_errata errata;
+
+ /* The CTC frequency.
+ *
+ * This is only required if MTC packets have been enabled in
+ * IA32_RTIT_CTRL.MTCEn.
+ */
+ uint32_t cpuid_0x15_eax, cpuid_0x15_ebx;
+
+ /* The MTC frequency as defined in IA32_RTIT_CTL.MTCFreq.
+ *
+ * This is only required if MTC packets have been enabled in
+ * IA32_RTIT_CTRL.MTCEn.
+ */
+ uint8_t mtc_freq;
+
+ /* The nominal frequency as defined in MSR_PLATFORM_INFO[15:8].
+ *
+ * This is only required if CYC packets have been enabled in
+ * IA32_RTIT_CTRL.CYCEn.
+ *
+ * If zero, timing calibration will only be able to use MTC and CYC
+ * packets.
+ *
+ * If not zero, timing calibration will also be able to use CBR
+ * packets.
+ */
+ uint8_t nom_freq;
+
+ /** A collection of decoder-specific flags. */
+ struct pt_conf_flags flags;
+
+ /** The address filter configuration. */
+ struct pt_conf_addr_filter addr_filter;
+};
+
+
+/** Zero-initialize an Intel PT configuration. */
+static inline void pt_config_init(struct pt_config *config)
+{
+ memset(config, 0, sizeof(*config));
+
+ config->size = sizeof(*config);
+}
+
+/** Determine errata for a given cpu.
+ *
+ * Updates \@errata based on \@cpu.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ * Returns -pte_invalid if \@errata or \@cpu is NULL.
+ * Returns -pte_bad_cpu if \@cpu is not known.
+ */
+extern pt_export int pt_cpu_errata(struct pt_errata *errata,
+ const struct pt_cpu *cpu);
+
+
+
+/* Packet encoder / decoder. */
+
+
+
+/** Intel PT packet types. */
+enum pt_packet_type {
+ /* An invalid packet. */
+ ppt_invalid,
+
+ /* A packet decodable by the optional decoder callback. */
+ ppt_unknown,
+
+ /* Actual packets supported by this library. */
+ ppt_pad,
+ ppt_psb,
+ ppt_psbend,
+ ppt_fup,
+ ppt_tip,
+ ppt_tip_pge,
+ ppt_tip_pgd,
+ ppt_tnt_8,
+ ppt_tnt_64,
+ ppt_mode,
+ ppt_pip,
+ ppt_vmcs,
+ ppt_cbr,
+ ppt_tsc,
+ ppt_tma,
+ ppt_mtc,
+ ppt_cyc,
+ ppt_stop,
+ ppt_ovf,
+ ppt_mnt,
+ ppt_exstop,
+ ppt_mwait,
+ ppt_pwre,
+ ppt_pwrx,
+ ppt_ptw
+};
+
+/** The IP compression. */
+enum pt_ip_compression {
+ /* The bits encode the payload size and the encoding scheme.
+ *
+ * No payload. The IP has been suppressed.
+ */
+ pt_ipc_suppressed = 0x0,
+
+ /* Payload: 16 bits. Update last IP. */
+ pt_ipc_update_16 = 0x01,
+
+ /* Payload: 32 bits. Update last IP. */
+ pt_ipc_update_32 = 0x02,
+
+ /* Payload: 48 bits. Sign extend to full address. */
+ pt_ipc_sext_48 = 0x03,
+
+ /* Payload: 48 bits. Update last IP. */
+ pt_ipc_update_48 = 0x04,
+
+ /* Payload: 64 bits. Full address. */
+ pt_ipc_full = 0x06
+};
+
+/** An execution mode. */
+enum pt_exec_mode {
+ ptem_unknown,
+ ptem_16bit,
+ ptem_32bit,
+ ptem_64bit
+};
+
+/** Mode packet leaves. */
+enum pt_mode_leaf {
+ pt_mol_exec = 0x00,
+ pt_mol_tsx = 0x20
+};
+
+/** A TNT-8 or TNT-64 packet. */
+struct pt_packet_tnt {
+ /** TNT payload bit size. */
+ uint8_t bit_size;
+
+ /** TNT payload excluding stop bit. */
+ uint64_t payload;
+};
+
+/** A packet with IP payload. */
+struct pt_packet_ip {
+ /** IP compression. */
+ enum pt_ip_compression ipc;
+
+ /** Zero-extended payload ip. */
+ uint64_t ip;
+};
+
+/** A mode.exec packet. */
+struct pt_packet_mode_exec {
+ /** The mode.exec csl bit. */
+ uint32_t csl:1;
+
+ /** The mode.exec csd bit. */
+ uint32_t csd:1;
+};
+
+static inline enum pt_exec_mode
+pt_get_exec_mode(const struct pt_packet_mode_exec *packet)
+{
+ if (packet->csl)
+ return packet->csd ? ptem_unknown : ptem_64bit;
+ else
+ return packet->csd ? ptem_32bit : ptem_16bit;
+}
+
+static inline struct pt_packet_mode_exec
+pt_set_exec_mode(enum pt_exec_mode mode)
+{
+ struct pt_packet_mode_exec packet;
+
+ switch (mode) {
+ default:
+ packet.csl = 1;
+ packet.csd = 1;
+ break;
+
+ case ptem_64bit:
+ packet.csl = 1;
+ packet.csd = 0;
+ break;
+
+ case ptem_32bit:
+ packet.csl = 0;
+ packet.csd = 1;
+ break;
+
+ case ptem_16bit:
+ packet.csl = 0;
+ packet.csd = 0;
+ break;
+ }
+
+ return packet;
+}
+
+/** A mode.tsx packet. */
+struct pt_packet_mode_tsx {
+ /** The mode.tsx intx bit. */
+ uint32_t intx:1;
+
+ /** The mode.tsx abrt bit. */
+ uint32_t abrt:1;
+};
+
+/** A mode packet. */
+struct pt_packet_mode {
+ /** Mode leaf. */
+ enum pt_mode_leaf leaf;
+
+ /** Mode bits. */
+ union {
+ /** Packet: mode.exec. */
+ struct pt_packet_mode_exec exec;
+
+ /** Packet: mode.tsx. */
+ struct pt_packet_mode_tsx tsx;
+ } bits;
+};
+
+/** A PIP packet. */
+struct pt_packet_pip {
+ /** The CR3 value. */
+ uint64_t cr3;
+
+ /** The non-root bit. */
+ uint32_t nr:1;
+};
+
+/** A TSC packet. */
+struct pt_packet_tsc {
+ /** The TSC value. */
+ uint64_t tsc;
+};
+
+/** A CBR packet. */
+struct pt_packet_cbr {
+ /** The core/bus cycle ratio. */
+ uint8_t ratio;
+};
+
+/** A TMA packet. */
+struct pt_packet_tma {
+ /** The crystal clock tick counter value. */
+ uint16_t ctc;
+
+ /** The fast counter value. */
+ uint16_t fc;
+};
+
+/** A MTC packet. */
+struct pt_packet_mtc {
+ /** The crystal clock tick counter value. */
+ uint8_t ctc;
+};
+
+/** A CYC packet. */
+struct pt_packet_cyc {
+ /** The cycle counter value. */
+ uint64_t value;
+};
+
+/** A VMCS packet. */
+struct pt_packet_vmcs {
+ /* The VMCS Base Address (i.e. the shifted payload). */
+ uint64_t base;
+};
+
+/** A MNT packet. */
+struct pt_packet_mnt {
+ /** The raw payload. */
+ uint64_t payload;
+};
+
+/** A EXSTOP packet. */
+struct pt_packet_exstop {
+ /** A flag specifying the binding of the packet:
+ *
+ * set: binds to the next FUP.
+ * clear: standalone.
+ */
+ uint32_t ip:1;
+};
+
+/** A MWAIT packet. */
+struct pt_packet_mwait {
+ /** The MWAIT hints (EAX). */
+ uint32_t hints;
+
+ /** The MWAIT extensions (ECX). */
+ uint32_t ext;
+};
+
+/** A PWRE packet. */
+struct pt_packet_pwre {
+ /** The resolved thread C-state. */
+ uint8_t state;
+
+ /** The resolved thread sub C-state. */
+ uint8_t sub_state;
+
+ /** A flag indicating whether the C-state entry was initiated by h/w. */
+ uint32_t hw:1;
+};
+
+/** A PWRX packet. */
+struct pt_packet_pwrx {
+ /** The core C-state at the time of the wake. */
+ uint8_t last;
+
+ /** The deepest core C-state achieved during sleep. */
+ uint8_t deepest;
+
+ /** The wake reason:
+ *
+ * - due to external interrupt received.
+ */
+ uint32_t interrupt:1;
+
+ /** - due to store to monitored address. */
+ uint32_t store:1;
+
+ /** - due to h/w autonomous condition such as HDC. */
+ uint32_t autonomous:1;
+};
+
+/** A PTW packet. */
+struct pt_packet_ptw {
+ /** The raw payload. */
+ uint64_t payload;
+
+ /** The payload size as encoded in the packet. */
+ uint8_t plc;
+
+ /** A flag saying whether a FUP is following PTW that provides
+ * the IP of the corresponding PTWRITE instruction.
+ */
+ uint32_t ip:1;
+};
+
+static inline int pt_ptw_size(uint8_t plc)
+{
+ switch (plc) {
+ case 0:
+ return 4;
+
+ case 1:
+ return 8;
+
+ case 2:
+ case 3:
+ return -pte_bad_packet;
+ }
+
+ return -pte_internal;
+}
+
+/** An unknown packet decodable by the optional decoder callback. */
+struct pt_packet_unknown {
+ /** Pointer to the raw packet bytes. */
+ const uint8_t *packet;
+
+ /** Optional pointer to a user-defined structure. */
+ void *priv;
+};
+
+/** An Intel PT packet. */
+struct pt_packet {
+ /** The type of the packet.
+ *
+ * This also determines the \@payload field.
+ */
+ enum pt_packet_type type;
+
+ /** The size of the packet including opcode and payload. */
+ uint8_t size;
+
+ /** Packet specific data. */
+ union {
+ /** Packets: pad, ovf, psb, psbend, stop - no payload. */
+
+ /** Packet: tnt-8, tnt-64. */
+ struct pt_packet_tnt tnt;
+
+ /** Packet: tip, fup, tip.pge, tip.pgd. */
+ struct pt_packet_ip ip;
+
+ /** Packet: mode. */
+ struct pt_packet_mode mode;
+
+ /** Packet: pip. */
+ struct pt_packet_pip pip;
+
+ /** Packet: tsc. */
+ struct pt_packet_tsc tsc;
+
+ /** Packet: cbr. */
+ struct pt_packet_cbr cbr;
+
+ /** Packet: tma. */
+ struct pt_packet_tma tma;
+
+ /** Packet: mtc. */
+ struct pt_packet_mtc mtc;
+
+ /** Packet: cyc. */
+ struct pt_packet_cyc cyc;
+
+ /** Packet: vmcs. */
+ struct pt_packet_vmcs vmcs;
+
+ /** Packet: mnt. */
+ struct pt_packet_mnt mnt;
+
+ /** Packet: exstop. */
+ struct pt_packet_exstop exstop;
+
+ /** Packet: mwait. */
+ struct pt_packet_mwait mwait;
+
+ /** Packet: pwre. */
+ struct pt_packet_pwre pwre;
+
+ /** Packet: pwrx. */
+ struct pt_packet_pwrx pwrx;
+
+ /** Packet: ptw. */
+ struct pt_packet_ptw ptw;
+
+ /** Packet: unknown. */
+ struct pt_packet_unknown unknown;
+ } payload;
+};
+
+
+
+/* Packet encoder. */
+
+
+
+/** Allocate an Intel PT packet encoder.
+ *
+ * The encoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the encoder.
+ *
+ * The encoder starts at the beginning of the trace buffer.
+ */
+extern pt_export struct pt_encoder *
+pt_alloc_encoder(const struct pt_config *config);
+
+/** Free an Intel PT packet encoder.
+ *
+ * The \@encoder must not be used after a successful return.
+ */
+extern pt_export void pt_free_encoder(struct pt_encoder *encoder);
+
+/** Hard set synchronization point of an Intel PT packet encoder.
+ *
+ * Synchronize \@encoder to \@offset within the trace buffer.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_eos if the given offset is behind the end of the trace buffer.
+ * Returns -pte_invalid if \@encoder is NULL.
+ */
+extern pt_export int pt_enc_sync_set(struct pt_encoder *encoder,
+ uint64_t offset);
+
+/** Get the current packet encoder position.
+ *
+ * Fills the current \@encoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@encoder or \@offset is NULL.
+ */
+extern pt_export int pt_enc_get_offset(const struct pt_encoder *encoder,
+ uint64_t *offset);
+
+/* Return a pointer to \@encoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@encoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_enc_get_config(const struct pt_encoder *encoder);
+
+/** Encode an Intel PT packet.
+ *
+ * Writes \@packet at \@encoder's current position in the Intel PT buffer and
+ * advances the \@encoder beyond the written packet.
+ *
+ * The \@packet.size field is ignored.
+ *
+ * In case of errors, the \@encoder is not advanced and nothing is written
+ * into the Intel PT buffer.
+ *
+ * Returns the number of bytes written on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_bad_opc if \@packet.type is not known.
+ * Returns -pte_bad_packet if \@packet's payload is invalid.
+ * Returns -pte_eos if \@encoder reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@encoder or \@packet is NULL.
+ */
+extern pt_export int pt_enc_next(struct pt_encoder *encoder,
+ const struct pt_packet *packet);
+
+
+
+/* Packet decoder. */
+
+
+
+/** Allocate an Intel PT packet decoder.
+ *
+ * The decoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the decoder.
+ *
+ * The decoder needs to be synchronized before it can be used.
+ */
+extern pt_export struct pt_packet_decoder *
+pt_pkt_alloc_decoder(const struct pt_config *config);
+
+/** Free an Intel PT packet decoder.
+ *
+ * The \@decoder must not be used after a successful return.
+ */
+extern pt_export void pt_pkt_free_decoder(struct pt_packet_decoder *decoder);
+
+/** Synchronize an Intel PT packet decoder.
+ *
+ * Search for the next synchronization point in forward or backward direction.
+ *
+ * If \@decoder has not been synchronized, yet, the search is started at the
+ * beginning of the trace buffer in case of forward synchronization and at the
+ * end of the trace buffer in case of backward synchronization.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_eos if no further synchronization point is found.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_pkt_sync_forward(struct pt_packet_decoder *decoder);
+extern pt_export int pt_pkt_sync_backward(struct pt_packet_decoder *decoder);
+
+/** Hard set synchronization point of an Intel PT decoder.
+ *
+ * Synchronize \@decoder to \@offset within the trace buffer.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_eos if the given offset is behind the end of the trace buffer.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_pkt_sync_set(struct pt_packet_decoder *decoder,
+ uint64_t offset);
+
+/** Get the current decoder position.
+ *
+ * Fills the current \@decoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_pkt_get_offset(const struct pt_packet_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the position of the last synchronization point.
+ *
+ * Fills the last synchronization position into \@offset.
+ *
+ * This is useful when splitting a trace stream for parallel decoding.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int
+pt_pkt_get_sync_offset(const struct pt_packet_decoder *decoder,
+ uint64_t *offset);
+
+/* Return a pointer to \@decoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@decoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_pkt_get_config(const struct pt_packet_decoder *decoder);
+
+/** Decode the next packet and advance the decoder.
+ *
+ * Decodes the packet at \@decoder's current position into \@packet and
+ * adjusts the \@decoder's position by the number of bytes the packet had
+ * consumed.
+ *
+ * The \@size argument must be set to sizeof(struct pt_packet).
+ *
+ * Returns the number of bytes consumed on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_bad_opc if the packet is unknown.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if \@decoder reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@packet is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_pkt_next(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet, size_t size);
+
+
+
+/* Query decoder. */
+
+
+
+/** Decoder status flags. */
+enum pt_status_flag {
+ /** There is an event pending. */
+ pts_event_pending = 1 << 0,
+
+ /** The address has been suppressed. */
+ pts_ip_suppressed = 1 << 1,
+
+ /** There is no more trace data available. */
+ pts_eos = 1 << 2
+};
+
+/** Event types. */
+enum pt_event_type {
+ /* Tracing has been enabled/disabled. */
+ ptev_enabled,
+ ptev_disabled,
+
+ /* Tracing has been disabled asynchronously. */
+ ptev_async_disabled,
+
+ /* An asynchronous branch, e.g. interrupt. */
+ ptev_async_branch,
+
+ /* A synchronous paging event. */
+ ptev_paging,
+
+ /* An asynchronous paging event. */
+ ptev_async_paging,
+
+ /* Trace overflow. */
+ ptev_overflow,
+
+ /* An execution mode change. */
+ ptev_exec_mode,
+
+ /* A transactional execution state change. */
+ ptev_tsx,
+
+ /* Trace Stop. */
+ ptev_stop,
+
+ /* A synchronous vmcs event. */
+ ptev_vmcs,
+
+ /* An asynchronous vmcs event. */
+ ptev_async_vmcs,
+
+ /* Execution has stopped. */
+ ptev_exstop,
+
+ /* An MWAIT operation completed. */
+ ptev_mwait,
+
+ /* A power state was entered. */
+ ptev_pwre,
+
+ /* A power state was exited. */
+ ptev_pwrx,
+
+ /* A PTWRITE event. */
+ ptev_ptwrite,
+
+ /* A timing event. */
+ ptev_tick,
+
+ /* A core:bus ratio event. */
+ ptev_cbr,
+
+ /* A maintenance event. */
+ ptev_mnt
+};
+
+/** An event. */
+struct pt_event {
+ /** The type of the event. */
+ enum pt_event_type type;
+
+ /** A flag indicating that the event IP has been suppressed. */
+ uint32_t ip_suppressed:1;
+
+ /** A flag indicating that the event is for status update. */
+ uint32_t status_update:1;
+
+ /** A flag indicating that the event has timing information. */
+ uint32_t has_tsc:1;
+
+ /** The time stamp count of the event.
+ *
+ * This field is only valid if \@has_tsc is set.
+ */
+ uint64_t tsc;
+
+ /** The number of lost mtc and cyc packets.
+ *
+ * This gives an idea about the quality of the \@tsc. The more packets
+ * were dropped, the less precise timing is.
+ */
+ uint32_t lost_mtc;
+ uint32_t lost_cyc;
+
+ /* Reserved space for future extensions. */
+ uint64_t reserved[2];
+
+ /** Event specific data. */
+ union {
+ /** Event: enabled. */
+ struct {
+ /** The address at which tracing resumes. */
+ uint64_t ip;
+
+ /** A flag indicating that tracing resumes from the IP
+ * at which tracing had been disabled before.
+ */
+ uint32_t resumed:1;
+ } enabled;
+
+ /** Event: disabled. */
+ struct {
+ /** The destination of the first branch inside a
+ * filtered area.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+
+ /* The exact source ip needs to be determined using
+ * disassembly and the filter configuration.
+ */
+ } disabled;
+
+ /** Event: async disabled. */
+ struct {
+ /** The source address of the asynchronous branch that
+ * disabled tracing.
+ */
+ uint64_t at;
+
+ /** The destination of the first branch inside a
+ * filtered area.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+ } async_disabled;
+
+ /** Event: async branch. */
+ struct {
+ /** The branch source address. */
+ uint64_t from;
+
+ /** The branch destination address.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t to;
+ } async_branch;
+
+ /** Event: paging. */
+ struct {
+ /** The updated CR3 value.
+ *
+ * The lower 5 bit have been zeroed out.
+ * The upper bits have been zeroed out depending on the
+ * maximum possible address.
+ */
+ uint64_t cr3;
+
+ /** A flag indicating whether the cpu is operating in
+ * vmx non-root (guest) mode.
+ */
+ uint32_t non_root:1;
+
+ /* The address at which the event is effective is
+ * obvious from the disassembly.
+ */
+ } paging;
+
+ /** Event: async paging. */
+ struct {
+ /** The updated CR3 value.
+ *
+ * The lower 5 bit have been zeroed out.
+ * The upper bits have been zeroed out depending on the
+ * maximum possible address.
+ */
+ uint64_t cr3;
+
+ /** A flag indicating whether the cpu is operating in
+ * vmx non-root (guest) mode.
+ */
+ uint32_t non_root:1;
+
+ /** The address at which the event is effective. */
+ uint64_t ip;
+ } async_paging;
+
+ /** Event: overflow. */
+ struct {
+ /** The address at which tracing resumes after overflow.
+ *
+ * This field is not valid, if ip_suppressed is set.
+ * In this case, the overflow resolved while tracing
+ * was disabled.
+ */
+ uint64_t ip;
+ } overflow;
+
+ /** Event: exec mode. */
+ struct {
+ /** The execution mode. */
+ enum pt_exec_mode mode;
+
+ /** The address at which the event is effective. */
+ uint64_t ip;
+ } exec_mode;
+
+ /** Event: tsx. */
+ struct {
+ /** The address at which the event is effective.
+ *
+ * This field is not valid if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+
+ /** A flag indicating speculative execution mode. */
+ uint32_t speculative:1;
+
+ /** A flag indicating speculative execution aborts. */
+ uint32_t aborted:1;
+ } tsx;
+
+ /** Event: vmcs. */
+ struct {
+ /** The VMCS base address.
+ *
+ * The address is zero-extended with the lower 12 bits
+ * all zero.
+ */
+ uint64_t base;
+
+ /* The new VMCS base address should be stored and
+ * applied on subsequent VM entries.
+ */
+ } vmcs;
+
+ /** Event: async vmcs. */
+ struct {
+ /** The VMCS base address.
+ *
+ * The address is zero-extended with the lower 12 bits
+ * all zero.
+ */
+ uint64_t base;
+
+ /** The address at which the event is effective. */
+ uint64_t ip;
+
+ /* An async paging event that binds to the same IP
+ * will always succeed this async vmcs event.
+ */
+ } async_vmcs;
+
+ /** Event: execution stopped. */
+ struct {
+ /** The address at which execution has stopped. This is
+ * the last instruction that did not complete.
+ *
+ * This field is not valid, if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+ } exstop;
+
+ /** Event: mwait. */
+ struct {
+ /** The address of the instruction causing the mwait.
+ *
+ * This field is not valid, if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+
+ /** The mwait hints (eax).
+ *
+ * Reserved bits are undefined.
+ */
+ uint32_t hints;
+
+ /** The mwait extensions (ecx).
+ *
+ * Reserved bits are undefined.
+ */
+ uint32_t ext;
+ } mwait;
+
+ /** Event: power state entry. */
+ struct {
+ /** The resolved thread C-state. */
+ uint8_t state;
+
+ /** The resolved thread sub C-state. */
+ uint8_t sub_state;
+
+ /** A flag indicating whether the C-state entry was
+ * initiated by h/w.
+ */
+ uint32_t hw:1;
+ } pwre;
+
+ /** Event: power state exit. */
+ struct {
+ /** The core C-state at the time of the wake. */
+ uint8_t last;
+
+ /** The deepest core C-state achieved during sleep. */
+ uint8_t deepest;
+
+ /** The wake reason:
+ *
+ * - due to external interrupt received.
+ */
+ uint32_t interrupt:1;
+
+ /** - due to store to monitored address. */
+ uint32_t store:1;
+
+ /** - due to h/w autonomous condition such as HDC. */
+ uint32_t autonomous:1;
+ } pwrx;
+
+ /** Event: ptwrite. */
+ struct {
+ /** The address of the ptwrite instruction.
+ *
+ * This field is not valid, if \@ip_suppressed is set.
+ *
+ * In this case, the address is obvious from the
+ * disassembly.
+ */
+ uint64_t ip;
+
+ /** The size of the below \@payload in bytes. */
+ uint8_t size;
+
+ /** The ptwrite payload. */
+ uint64_t payload;
+ } ptwrite;
+
+ /** Event: tick. */
+ struct {
+ /** The instruction address near which the tick occured.
+ *
+ * A timestamp can sometimes be attributed directly to
+ * an instruction (e.g. to an indirect branch that
+ * receives CYC + TIP) and sometimes not (e.g. MTC).
+ *
+ * This field is not valid, if \@ip_suppressed is set.
+ */
+ uint64_t ip;
+ } tick;
+
+ /** Event: cbr. */
+ struct {
+ /** The core:bus ratio. */
+ uint16_t ratio;
+ } cbr;
+
+ /** Event: mnt. */
+ struct {
+ /** The raw payload. */
+ uint64_t payload;
+ } mnt;
+ } variant;
+};
+
+
+/** Allocate an Intel PT query decoder.
+ *
+ * The decoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the decoder.
+ *
+ * The decoder needs to be synchronized before it can be used.
+ */
+extern pt_export struct pt_query_decoder *
+pt_qry_alloc_decoder(const struct pt_config *config);
+
+/** Free an Intel PT query decoder.
+ *
+ * The \@decoder must not be used after a successful return.
+ */
+extern pt_export void pt_qry_free_decoder(struct pt_query_decoder *decoder);
+
+/** Synchronize an Intel PT query decoder.
+ *
+ * Search for the next synchronization point in forward or backward direction.
+ *
+ * If \@decoder has not been synchronized, yet, the search is started at the
+ * beginning of the trace buffer in case of forward synchronization and at the
+ * end of the trace buffer in case of backward synchronization.
+ *
+ * If \@ip is not NULL, set it to last ip.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if no further synchronization point is found.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_qry_sync_forward(struct pt_query_decoder *decoder,
+ uint64_t *ip);
+extern pt_export int pt_qry_sync_backward(struct pt_query_decoder *decoder,
+ uint64_t *ip);
+
+/** Manually synchronize an Intel PT query decoder.
+ *
+ * Synchronize \@decoder on the syncpoint at \@offset. There must be a PSB
+ * packet at \@offset.
+ *
+ * If \@ip is not NULL, set it to last ip.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if \@offset lies outside of \@decoder's trace buffer.
+ * Returns -pte_eos if \@decoder reaches the end of its trace buffer.
+ * Returns -pte_invalid if \@decoder is NULL.
+ * Returns -pte_nosync if there is no syncpoint at \@offset.
+ */
+extern pt_export int pt_qry_sync_set(struct pt_query_decoder *decoder,
+ uint64_t *ip, uint64_t offset);
+
+/** Get the current decoder position.
+ *
+ * Fills the current \@decoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_qry_get_offset(const struct pt_query_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the position of the last synchronization point.
+ *
+ * Fills the last synchronization position into \@offset.
+ *
+ * This is useful for splitting a trace stream for parallel decoding.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int
+pt_qry_get_sync_offset(const struct pt_query_decoder *decoder,
+ uint64_t *offset);
+
+/* Return a pointer to \@decoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@decoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_qry_get_config(const struct pt_query_decoder *decoder);
+
+/** Query whether the next unconditional branch has been taken.
+ *
+ * On success, provides 1 (taken) or 0 (not taken) in \@taken for the next
+ * conditional branch and updates \@decoder.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_bad_query if no conditional branch is found.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@taken is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_qry_cond_branch(struct pt_query_decoder *decoder,
+ int *taken);
+
+/** Get the next indirect branch destination.
+ *
+ * On success, provides the linear destination address of the next indirect
+ * branch in \@ip and updates \@decoder.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_bad_query if no indirect branch is found.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@ip is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_qry_indirect_branch(struct pt_query_decoder *decoder,
+ uint64_t *ip);
+
+/** Query the next pending event.
+ *
+ * On success, provides the next event \@event and updates \@decoder.
+ *
+ * The \@size argument must be set to sizeof(struct pt_event).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_bad_query if no event is found.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@event is NULL.
+ * Returns -pte_invalid if \@size is too small.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_qry_event(struct pt_query_decoder *decoder,
+ struct pt_event *event, size_t size);
+
+/** Query the current time.
+ *
+ * On success, provides the time at the last query in \@time.
+ *
+ * The time is similar to what a rdtsc instruction would return. Depending
+ * on the configuration, the time may not be fully accurate. If TSC is not
+ * enabled, the time is relative to the last synchronization and can't be used
+ * to correlate with other TSC-based time sources. In this case, -pte_no_time
+ * is returned and the relative time is provided in \@time.
+ *
+ * Some timing-related packets may need to be dropped (mostly due to missing
+ * calibration or incomplete configuration). To get an idea about the quality
+ * of the estimated time, we record the number of dropped MTC and CYC packets.
+ *
+ * If \@lost_mtc is not NULL, set it to the number of lost MTC packets.
+ * If \@lost_cyc is not NULL, set it to the number of lost CYC packets.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@time is NULL.
+ * Returns -pte_no_time if there has not been a TSC packet.
+ */
+extern pt_export int pt_qry_time(struct pt_query_decoder *decoder,
+ uint64_t *time, uint32_t *lost_mtc,
+ uint32_t *lost_cyc);
+
+/** Return the current core bus ratio.
+ *
+ * On success, provides the current core:bus ratio in \@cbr. The ratio is
+ * defined as core cycles per bus clock cycle.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@cbr is NULL.
+ * Returns -pte_no_cbr if there has not been a CBR packet.
+ */
+extern pt_export int pt_qry_core_bus_ratio(struct pt_query_decoder *decoder,
+ uint32_t *cbr);
+
+
+
+/* Traced image. */
+
+
+
+/** An Intel PT address space identifier.
+ *
+ * This identifies a particular address space when adding file sections or
+ * when reading memory.
+ */
+struct pt_asid {
+ /** The size of this object - set to sizeof(struct pt_asid). */
+ size_t size;
+
+ /** The CR3 value. */
+ uint64_t cr3;
+
+ /** The VMCS Base address. */
+ uint64_t vmcs;
+};
+
+/** An unknown CR3 value to be used for pt_asid objects. */
+static const uint64_t pt_asid_no_cr3 = 0xffffffffffffffffull;
+
+/** An unknown VMCS Base value to be used for pt_asid objects. */
+static const uint64_t pt_asid_no_vmcs = 0xffffffffffffffffull;
+
+/** Initialize an address space identifier. */
+static inline void pt_asid_init(struct pt_asid *asid)
+{
+ asid->size = sizeof(*asid);
+ asid->cr3 = pt_asid_no_cr3;
+ asid->vmcs = pt_asid_no_vmcs;
+}
+
+
+/** A cache of traced image sections. */
+struct pt_image_section_cache;
+
+/** Allocate a traced memory image section cache.
+ *
+ * An optional \@name may be given to the cache. The name string is copied.
+ *
+ * Returns a new traced memory image section cache on success, NULL otherwise.
+ */
+extern pt_export struct pt_image_section_cache *
+pt_iscache_alloc(const char *name);
+
+/** Free a traced memory image section cache.
+ *
+ * The \@iscache must have been allocated with pt_iscache_alloc().
+ * The \@iscache must not be used after a successful return.
+ */
+extern pt_export void pt_iscache_free(struct pt_image_section_cache *iscache);
+
+/** Set the image section cache limit.
+ *
+ * Set the limit for a section cache in bytes. A non-zero limit will keep the
+ * least recently used sections mapped until the limit is reached. A limit of
+ * zero disables caching.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ * Returns -pte_invalid if \@iscache is NULL.
+ */
+extern pt_export int
+pt_iscache_set_limit(struct pt_image_section_cache *iscache, uint64_t limit);
+
+/** Get the image section cache name.
+ *
+ * Returns a pointer to \@iscache's name or NULL if there is no name.
+ */
+extern pt_export const char *
+pt_iscache_name(const struct pt_image_section_cache *iscache);
+
+/** Add a new file section to the traced memory image section cache.
+ *
+ * Adds a new section consisting of \@size bytes starting at \@offset in
+ * \@filename loaded at the virtual address \@vaddr if \@iscache does not
+ * already contain such a section.
+ *
+ * Returns an image section identifier (isid) uniquely identifying that section
+ * in \@iscache.
+ *
+ * The section is silently truncated to match the size of \@filename.
+ *
+ * Returns a positive isid on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@iscache or \@filename is NULL.
+ * Returns -pte_invalid if \@offset is too big.
+ */
+extern pt_export int pt_iscache_add_file(struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset,
+ uint64_t size, uint64_t vaddr);
+
+/** Read memory from a cached file section
+ *
+ * Reads \@size bytes of memory starting at virtual address \@vaddr in the
+ * section identified by \@isid in \@iscache into \@buffer.
+ *
+ * The caller is responsible for allocating a \@buffer of at least \@size bytes.
+ *
+ * The read request may be truncated if it crosses section boundaries or if
+ * \@size is getting too big. We support reading at least 4Kbyte in one chunk
+ * unless the read would cross a section boundary.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@iscache or \@buffer is NULL.
+ * Returns -pte_invalid if \@size is zero.
+ * Returns -pte_nomap if \@vaddr is not contained in section \@isid.
+ * Returns -pte_bad_image if \@iscache does not contain \@isid.
+ */
+extern pt_export int pt_iscache_read(struct pt_image_section_cache *iscache,
+ uint8_t *buffer, uint64_t size, int isid,
+ uint64_t vaddr);
+
+/** The traced memory image. */
+struct pt_image;
+
+
+/** Allocate a traced memory image.
+ *
+ * An optional \@name may be given to the image. The name string is copied.
+ *
+ * Returns a new traced memory image on success, NULL otherwise.
+ */
+extern pt_export struct pt_image *pt_image_alloc(const char *name);
+
+/** Free a traced memory image.
+ *
+ * The \@image must have been allocated with pt_image_alloc().
+ * The \@image must not be used after a successful return.
+ */
+extern pt_export void pt_image_free(struct pt_image *image);
+
+/** Get the image name.
+ *
+ * Returns a pointer to \@image's name or NULL if there is no name.
+ */
+extern pt_export const char *pt_image_name(const struct pt_image *image);
+
+/** Add a new file section to the traced memory image.
+ *
+ * Adds \@size bytes starting at \@offset in \@filename. The section is
+ * loaded at the virtual address \@vaddr in the address space \@asid.
+ *
+ * The \@asid may be NULL or (partially) invalid. In that case only the valid
+ * fields are considered when comparing with other address-spaces. Use this
+ * when tracing a single process or when adding sections to all processes.
+ *
+ * The section is silently truncated to match the size of \@filename.
+ *
+ * Existing sections that would overlap with the new section will be shrunk
+ * or split.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@image or \@filename is NULL.
+ * Returns -pte_invalid if \@offset is too big.
+ */
+extern pt_export int pt_image_add_file(struct pt_image *image,
+ const char *filename, uint64_t offset,
+ uint64_t size,
+ const struct pt_asid *asid,
+ uint64_t vaddr);
+
+/** Add a section from an image section cache.
+ *
+ * Add the section from \@iscache identified by \@isid in address space \@asid.
+ *
+ * Existing sections that would overlap with the new section will be shrunk
+ * or split.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_invalid if \@image or \@iscache is NULL.
+ * Returns -pte_bad_image if \@iscache does not contain \@isid.
+ */
+extern pt_export int pt_image_add_cached(struct pt_image *image,
+ struct pt_image_section_cache *iscache,
+ int isid, const struct pt_asid *asid);
+
+/** Copy an image.
+ *
+ * Adds all sections from \@src to \@image. Sections that could not be added
+ * will be ignored.
+ *
+ * Returns the number of ignored sections on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_invalid if \@image or \@src is NULL.
+ */
+extern pt_export int pt_image_copy(struct pt_image *image,
+ const struct pt_image *src);
+
+/** Remove all sections loaded from a file.
+ *
+ * Removes all sections loaded from \@filename from the address space \@asid.
+ * Specify the same \@asid that was used for adding sections from \@filename.
+ *
+ * Returns the number of removed sections on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_invalid if \@image or \@filename is NULL.
+ */
+extern pt_export int pt_image_remove_by_filename(struct pt_image *image,
+ const char *filename,
+ const struct pt_asid *asid);
+
+/** Remove all sections loaded into an address space.
+ *
+ * Removes all sections loaded into \@asid. Specify the same \@asid that was
+ * used for adding sections.
+ *
+ * Returns the number of removed sections on success, a negative error code
+ * otherwise.
+ *
+ * Returns -pte_invalid if \@image is NULL.
+ */
+extern pt_export int pt_image_remove_by_asid(struct pt_image *image,
+ const struct pt_asid *asid);
+
+/** A read memory callback function.
+ *
+ * It shall read \@size bytes of memory from address space \@asid starting
+ * at \@ip into \@buffer.
+ *
+ * It shall return the number of bytes read on success.
+ * It shall return a negative pt_error_code otherwise.
+ */
+typedef int (read_memory_callback_t)(uint8_t *buffer, size_t size,
+ const struct pt_asid *asid,
+ uint64_t ip, void *context);
+
+/** Set the memory callback for the traced memory image.
+ *
+ * Sets \@callback for reading memory. The callback is used for addresses
+ * that are not found in file sections. The \@context argument is passed
+ * to \@callback on each use.
+ *
+ * There can only be one callback at any time. A subsequent call will replace
+ * the previous callback. If \@callback is NULL, the callback is removed.
+ *
+ * Returns -pte_invalid if \@image is NULL.
+ */
+extern pt_export int pt_image_set_callback(struct pt_image *image,
+ read_memory_callback_t *callback,
+ void *context);
+
+
+
+/* Instruction flow decoder. */
+
+
+
+/** The instruction class.
+ *
+ * We provide only a very coarse classification suitable for reconstructing
+ * the execution flow.
+ */
+enum pt_insn_class {
+ /* The instruction could not be classified. */
+ ptic_error,
+
+ /* The instruction is something not listed below. */
+ ptic_other,
+
+ /* The instruction is a near (function) call. */
+ ptic_call,
+
+ /* The instruction is a near (function) return. */
+ ptic_return,
+
+ /* The instruction is a near unconditional jump. */
+ ptic_jump,
+
+ /* The instruction is a near conditional jump. */
+ ptic_cond_jump,
+
+ /* The instruction is a call-like far transfer.
+ * E.g. SYSCALL, SYSENTER, or FAR CALL.
+ */
+ ptic_far_call,
+
+ /* The instruction is a return-like far transfer.
+ * E.g. SYSRET, SYSEXIT, IRET, or FAR RET.
+ */
+ ptic_far_return,
+
+ /* The instruction is a jump-like far transfer.
+ * E.g. FAR JMP.
+ */
+ ptic_far_jump,
+
+ /* The instruction is a PTWRITE. */
+ ptic_ptwrite
+};
+
+/** The maximal size of an instruction. */
+enum {
+ pt_max_insn_size = 15
+};
+
+/** A single traced instruction. */
+struct pt_insn {
+ /** The virtual address in its process. */
+ uint64_t ip;
+
+ /** The image section identifier for the section containing this
+ * instruction.
+ *
+ * A value of zero means that the section did not have an identifier.
+ * The section was not added via an image section cache or the memory
+ * was read via the read memory callback.
+ */
+ int isid;
+
+ /** The execution mode. */
+ enum pt_exec_mode mode;
+
+ /** A coarse classification. */
+ enum pt_insn_class iclass;
+
+ /** The raw bytes. */
+ uint8_t raw[pt_max_insn_size];
+
+ /** The size in bytes. */
+ uint8_t size;
+
+ /** A collection of flags giving additional information:
+ *
+ * - the instruction was executed speculatively.
+ */
+ uint32_t speculative:1;
+
+ /** - this instruction is truncated in its image section.
+ *
+ * It starts in the image section identified by \@isid and continues
+ * in one or more other sections.
+ */
+ uint32_t truncated:1;
+};
+
+
+/** Allocate an Intel PT instruction flow decoder.
+ *
+ * The decoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the decoder.
+ *
+ * The decoder needs to be synchronized before it can be used.
+ */
+extern pt_export struct pt_insn_decoder *
+pt_insn_alloc_decoder(const struct pt_config *config);
+
+/** Free an Intel PT instruction flow decoder.
+ *
+ * This will destroy the decoder's default image.
+ *
+ * The \@decoder must not be used after a successful return.
+ */
+extern pt_export void pt_insn_free_decoder(struct pt_insn_decoder *decoder);
+
+/** Synchronize an Intel PT instruction flow decoder.
+ *
+ * Search for the next synchronization point in forward or backward direction.
+ *
+ * If \@decoder has not been synchronized, yet, the search is started at the
+ * beginning of the trace buffer in case of forward synchronization and at the
+ * end of the trace buffer in case of backward synchronization.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if no further synchronization point is found.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_insn_sync_forward(struct pt_insn_decoder *decoder);
+extern pt_export int pt_insn_sync_backward(struct pt_insn_decoder *decoder);
+
+/** Manually synchronize an Intel PT instruction flow decoder.
+ *
+ * Synchronize \@decoder on the syncpoint at \@offset. There must be a PSB
+ * packet at \@offset.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if \@offset lies outside of \@decoder's trace buffer.
+ * Returns -pte_eos if \@decoder reaches the end of its trace buffer.
+ * Returns -pte_invalid if \@decoder is NULL.
+ * Returns -pte_nosync if there is no syncpoint at \@offset.
+ */
+extern pt_export int pt_insn_sync_set(struct pt_insn_decoder *decoder,
+ uint64_t offset);
+
+/** Get the current decoder position.
+ *
+ * Fills the current \@decoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_insn_get_offset(const struct pt_insn_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the position of the last synchronization point.
+ *
+ * Fills the last synchronization position into \@offset.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int
+pt_insn_get_sync_offset(const struct pt_insn_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the traced image.
+ *
+ * The returned image may be modified as long as no decoder that uses this
+ * image is running.
+ *
+ * Returns a pointer to the traced image the decoder uses for reading memory.
+ * Returns NULL if \@decoder is NULL.
+ */
+extern pt_export struct pt_image *
+pt_insn_get_image(struct pt_insn_decoder *decoder);
+
+/** Set the traced image.
+ *
+ * Sets the image that \@decoder uses for reading memory to \@image. If \@image
+ * is NULL, sets the image to \@decoder's default image.
+ *
+ * Only one image can be active at any time.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Return -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_insn_set_image(struct pt_insn_decoder *decoder,
+ struct pt_image *image);
+
+/* Return a pointer to \@decoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@decoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_insn_get_config(const struct pt_insn_decoder *decoder);
+
+/** Return the current time.
+ *
+ * On success, provides the time at the last preceding timing packet in \@time.
+ *
+ * The time is similar to what a rdtsc instruction would return. Depending
+ * on the configuration, the time may not be fully accurate. If TSC is not
+ * enabled, the time is relative to the last synchronization and can't be used
+ * to correlate with other TSC-based time sources. In this case, -pte_no_time
+ * is returned and the relative time is provided in \@time.
+ *
+ * Some timing-related packets may need to be dropped (mostly due to missing
+ * calibration or incomplete configuration). To get an idea about the quality
+ * of the estimated time, we record the number of dropped MTC and CYC packets.
+ *
+ * If \@lost_mtc is not NULL, set it to the number of lost MTC packets.
+ * If \@lost_cyc is not NULL, set it to the number of lost CYC packets.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@time is NULL.
+ * Returns -pte_no_time if there has not been a TSC packet.
+ */
+extern pt_export int pt_insn_time(struct pt_insn_decoder *decoder,
+ uint64_t *time, uint32_t *lost_mtc,
+ uint32_t *lost_cyc);
+
+/** Return the current core bus ratio.
+ *
+ * On success, provides the current core:bus ratio in \@cbr. The ratio is
+ * defined as core cycles per bus clock cycle.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@cbr is NULL.
+ * Returns -pte_no_cbr if there has not been a CBR packet.
+ */
+extern pt_export int pt_insn_core_bus_ratio(struct pt_insn_decoder *decoder,
+ uint32_t *cbr);
+
+/** Return the current address space identifier.
+ *
+ * On success, provides the current address space identifier in \@asid.
+ *
+ * The \@size argument must be set to sizeof(struct pt_asid). At most \@size
+ * bytes will be copied and \@asid->size will be set to the actual size of the
+ * provided address space identifier.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@asid is NULL.
+ */
+extern pt_export int pt_insn_asid(const struct pt_insn_decoder *decoder,
+ struct pt_asid *asid, size_t size);
+
+/** Determine the next instruction.
+ *
+ * On success, provides the next instruction in execution order in \@insn.
+ *
+ * The \@size argument must be set to sizeof(struct pt_insn).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns pts_eos to indicate the end of the trace stream. Subsequent calls
+ * to pt_insn_next() will continue to return pts_eos until trace is required
+ * to determine the next instruction.
+ *
+ * Returns -pte_bad_context if the decoder encountered an unexpected packet.
+ * Returns -pte_bad_opc if the decoder encountered unknown packets.
+ * Returns -pte_bad_packet if the decoder encountered unknown packet payloads.
+ * Returns -pte_bad_query if the decoder got out of sync.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@insn is NULL.
+ * Returns -pte_nomap if the memory at the instruction address can't be read.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_insn_next(struct pt_insn_decoder *decoder,
+ struct pt_insn *insn, size_t size);
+
+/** Get the next pending event.
+ *
+ * On success, provides the next event in \@event and updates \@decoder.
+ *
+ * The \@size argument must be set to sizeof(struct pt_event).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_query if there is no event.
+ * Returns -pte_invalid if \@decoder or \@event is NULL.
+ * Returns -pte_invalid if \@size is too small.
+ */
+extern pt_export int pt_insn_event(struct pt_insn_decoder *decoder,
+ struct pt_event *event, size_t size);
+
+
+
+/* Block decoder. */
+
+
+
+/** A block of instructions.
+ *
+ * Instructions in this block are executed sequentially but are not necessarily
+ * contiguous in memory. Users are expected to follow direct branches.
+ */
+struct pt_block {
+ /** The IP of the first instruction in this block. */
+ uint64_t ip;
+
+ /** The IP of the last instruction in this block.
+ *
+ * This can be used for error-detection.
+ */
+ uint64_t end_ip;
+
+ /** The image section that contains the instructions in this block.
+ *
+ * A value of zero means that the section did not have an identifier.
+ * The section was not added via an image section cache or the memory
+ * was read via the read memory callback.
+ */
+ int isid;
+
+ /** The execution mode for all instructions in this block. */
+ enum pt_exec_mode mode;
+
+ /** The instruction class for the last instruction in this block.
+ *
+ * This field may be set to ptic_error to indicate that the instruction
+ * class is not available. The block decoder may choose to not provide
+ * the instruction class in some cases for performance reasons.
+ */
+ enum pt_insn_class iclass;
+
+ /** The number of instructions in this block. */
+ uint16_t ninsn;
+
+ /** The raw bytes of the last instruction in this block in case the
+ * instruction does not fit entirely into this block's section.
+ *
+ * This field is only valid if \@truncated is set.
+ */
+ uint8_t raw[pt_max_insn_size];
+
+ /** The size of the last instruction in this block in bytes.
+ *
+ * This field is only valid if \@truncated is set.
+ */
+ uint8_t size;
+
+ /** A collection of flags giving additional information about the
+ * instructions in this block.
+ *
+ * - all instructions in this block were executed speculatively.
+ */
+ uint32_t speculative:1;
+
+ /** - the last instruction in this block is truncated.
+ *
+ * It starts in this block's section but continues in one or more
+ * other sections depending on how fragmented the memory image is.
+ *
+ * The raw bytes for the last instruction are provided in \@raw and
+ * its size in \@size in this case.
+ */
+ uint32_t truncated:1;
+};
+
+/** Allocate an Intel PT block decoder.
+ *
+ * The decoder will work on the buffer defined in \@config, it shall contain
+ * raw trace data and remain valid for the lifetime of the decoder.
+ *
+ * The decoder needs to be synchronized before it can be used.
+ */
+extern pt_export struct pt_block_decoder *
+pt_blk_alloc_decoder(const struct pt_config *config);
+
+/** Free an Intel PT block decoder.
+ *
+ * This will destroy the decoder's default image.
+ *
+ * The \@decoder must not be used after a successful return.
+ */
+extern pt_export void pt_blk_free_decoder(struct pt_block_decoder *decoder);
+
+/** Synchronize an Intel PT block decoder.
+ *
+ * Search for the next synchronization point in forward or backward direction.
+ *
+ * If \@decoder has not been synchronized, yet, the search is started at the
+ * beginning of the trace buffer in case of forward synchronization and at the
+ * end of the trace buffer in case of backward synchronization.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if no further synchronization point is found.
+ * Returns -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_blk_sync_forward(struct pt_block_decoder *decoder);
+extern pt_export int pt_blk_sync_backward(struct pt_block_decoder *decoder);
+
+/** Manually synchronize an Intel PT block decoder.
+ *
+ * Synchronize \@decoder on the syncpoint at \@offset. There must be a PSB
+ * packet at \@offset.
+ *
+ * Returns zero or a positive value on success, a negative error code otherwise.
+ *
+ * Returns -pte_bad_opc if an unknown packet is encountered.
+ * Returns -pte_bad_packet if an unknown packet payload is encountered.
+ * Returns -pte_eos if \@offset lies outside of \@decoder's trace buffer.
+ * Returns -pte_eos if \@decoder reaches the end of its trace buffer.
+ * Returns -pte_invalid if \@decoder is NULL.
+ * Returns -pte_nosync if there is no syncpoint at \@offset.
+ */
+extern pt_export int pt_blk_sync_set(struct pt_block_decoder *decoder,
+ uint64_t offset);
+
+/** Get the current decoder position.
+ *
+ * Fills the current \@decoder position into \@offset.
+ *
+ * This is useful for reporting errors.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_blk_get_offset(const struct pt_block_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the position of the last synchronization point.
+ *
+ * Fills the last synchronization position into \@offset.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@offset is NULL.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int
+pt_blk_get_sync_offset(const struct pt_block_decoder *decoder,
+ uint64_t *offset);
+
+/** Get the traced image.
+ *
+ * The returned image may be modified as long as \@decoder is not running.
+ *
+ * Returns a pointer to the traced image \@decoder uses for reading memory.
+ * Returns NULL if \@decoder is NULL.
+ */
+extern pt_export struct pt_image *
+pt_blk_get_image(struct pt_block_decoder *decoder);
+
+/** Set the traced image.
+ *
+ * Sets the image that \@decoder uses for reading memory to \@image. If \@image
+ * is NULL, sets the image to \@decoder's default image.
+ *
+ * Only one image can be active at any time.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Return -pte_invalid if \@decoder is NULL.
+ */
+extern pt_export int pt_blk_set_image(struct pt_block_decoder *decoder,
+ struct pt_image *image);
+
+/* Return a pointer to \@decoder's configuration.
+ *
+ * Returns a non-null pointer on success, NULL if \@decoder is NULL.
+ */
+extern pt_export const struct pt_config *
+pt_blk_get_config(const struct pt_block_decoder *decoder);
+
+/** Return the current time.
+ *
+ * On success, provides the time at the last preceding timing packet in \@time.
+ *
+ * The time is similar to what a rdtsc instruction would return. Depending
+ * on the configuration, the time may not be fully accurate. If TSC is not
+ * enabled, the time is relative to the last synchronization and can't be used
+ * to correlate with other TSC-based time sources. In this case, -pte_no_time
+ * is returned and the relative time is provided in \@time.
+ *
+ * Some timing-related packets may need to be dropped (mostly due to missing
+ * calibration or incomplete configuration). To get an idea about the quality
+ * of the estimated time, we record the number of dropped MTC and CYC packets.
+ *
+ * If \@lost_mtc is not NULL, set it to the number of lost MTC packets.
+ * If \@lost_cyc is not NULL, set it to the number of lost CYC packets.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@time is NULL.
+ * Returns -pte_no_time if there has not been a TSC packet.
+ */
+extern pt_export int pt_blk_time(struct pt_block_decoder *decoder,
+ uint64_t *time, uint32_t *lost_mtc,
+ uint32_t *lost_cyc);
+
+/** Return the current core bus ratio.
+ *
+ * On success, provides the current core:bus ratio in \@cbr. The ratio is
+ * defined as core cycles per bus clock cycle.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@cbr is NULL.
+ * Returns -pte_no_cbr if there has not been a CBR packet.
+ */
+extern pt_export int pt_blk_core_bus_ratio(struct pt_block_decoder *decoder,
+ uint32_t *cbr);
+
+/** Return the current address space identifier.
+ *
+ * On success, provides the current address space identifier in \@asid.
+ *
+ * The \@size argument must be set to sizeof(struct pt_asid). At most \@size
+ * bytes will be copied and \@asid->size will be set to the actual size of the
+ * provided address space identifier.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_invalid if \@decoder or \@asid is NULL.
+ */
+extern pt_export int pt_blk_asid(const struct pt_block_decoder *decoder,
+ struct pt_asid *asid, size_t size);
+
+/** Determine the next block of instructions.
+ *
+ * On success, provides the next block of instructions in execution order in
+ * \@block.
+ *
+ * The \@size argument must be set to sizeof(struct pt_block).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns pts_eos to indicate the end of the trace stream. Subsequent calls
+ * to pt_block_next() will continue to return pts_eos until trace is required
+ * to determine the next instruction.
+ *
+ * Returns -pte_bad_context if the decoder encountered an unexpected packet.
+ * Returns -pte_bad_opc if the decoder encountered unknown packets.
+ * Returns -pte_bad_packet if the decoder encountered unknown packet payloads.
+ * Returns -pte_bad_query if the decoder got out of sync.
+ * Returns -pte_eos if decoding reached the end of the Intel PT buffer.
+ * Returns -pte_invalid if \@decoder or \@block is NULL.
+ * Returns -pte_nomap if the memory at the instruction address can't be read.
+ * Returns -pte_nosync if \@decoder is out of sync.
+ */
+extern pt_export int pt_blk_next(struct pt_block_decoder *decoder,
+ struct pt_block *block, size_t size);
+
+/** Get the next pending event.
+ *
+ * On success, provides the next event in \@event and updates \@decoder.
+ *
+ * The \@size argument must be set to sizeof(struct pt_event).
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ *
+ * Returns -pte_bad_query if there is no event.
+ * Returns -pte_invalid if \@decoder or \@event is NULL.
+ * Returns -pte_invalid if \@size is too small.
+ */
+extern pt_export int pt_blk_event(struct pt_block_decoder *decoder,
+ struct pt_event *event, size_t size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* INTEL_PT_H */
diff --git a/contrib/processor-trace/libipt/internal/include/posix/pt_section_posix.h b/contrib/processor-trace/libipt/internal/include/posix/pt_section_posix.h
new file mode 100644
index 0000000000000..99e85a834a3bb
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/posix/pt_section_posix.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SECTION_POSIX_H
+#define PT_SECTION_POSIX_H
+
+#include <stdint.h>
+#include <sys/stat.h>
+
+struct pt_section;
+
+
+/* Fstat-based file status. */
+struct pt_sec_posix_status {
+ /* The file status. */
+ struct stat stat;
+};
+
+/* MMAP-based section mapping information. */
+struct pt_sec_posix_mapping {
+ /* The mmap base address. */
+ uint8_t *base;
+
+ /* The mapped memory size. */
+ uint64_t size;
+
+ /* The begin and end of the mapped memory. */
+ const uint8_t *begin, *end;
+};
+
+
+/* Map a section.
+ *
+ * On success, sets @section's mapping, unmap, and read pointers.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @file are NULL.
+ * Returns -pte_invalid if @section can't be mapped.
+ */
+extern int pt_sec_posix_map(struct pt_section *section, int fd);
+
+/* Unmap a section.
+ *
+ * On success, clears @section's mapping, unmap, and read pointers.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_sec_posix_unmap(struct pt_section *section);
+
+/* Read memory from an mmaped section.
+ *
+ * Reads at most @size bytes from @section at @offset into @buffer.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ * Returns -pte_invalid if @section or @buffer are NULL.
+ * Returns -pte_nomap if @offset is beyond the end of the section.
+ */
+extern int pt_sec_posix_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset);
+
+/* Compute the memory size of a section.
+ *
+ * On success, provides the amount of memory used for mapping @section in bytes
+ * in @size.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @size is NULL.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_sec_posix_memsize(const struct pt_section *section,
+ uint64_t *size);
+
+#endif /* PT_SECTION_POSIX_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_asid.h b/contrib/processor-trace/libipt/internal/include/pt_asid.h
new file mode 100644
index 0000000000000..cded0c1092d30
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_asid.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_ASID_H
+#define PT_ASID_H
+
+#include <stddef.h>
+
+struct pt_asid;
+
+
+/* Read an asid provided by our user.
+ *
+ * Translate a user-provided asid in @user into @asid. This uses default values
+ * for fields that are not provided by the user and for all fields, if @user is
+ * NULL.
+ *
+ * Fields set in @user that are not known (i.e. from a newer version of this
+ * library) will be ignored.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal, if @asid is NULL.
+ */
+extern int pt_asid_from_user(struct pt_asid *asid, const struct pt_asid *user);
+
+/* Provide an asid to the user.
+ *
+ * Translate @asid into a potentially older or newer version in @user.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal, if @user or @asid is NULL.
+ * Returns -pte_invalid, if @size is too small.
+ */
+extern int pt_asid_to_user(struct pt_asid *user, const struct pt_asid *asid,
+ size_t size);
+
+/* Match two asids.
+ *
+ * Asids match if all fields provide either default values or are identical.
+ *
+ * Returns a positive number if @lhs matches @rhs.
+ * Returns zero if @lhs does not match @rhs.
+ * Returns a negative error code otherwise.
+ *
+ * Returns -pte_internal if @lhs or @rhs are NULL.
+ */
+extern int pt_asid_match(const struct pt_asid *lhs, const struct pt_asid *rhs);
+
+#endif /* PT_ASID_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_block_cache.h b/contrib/processor-trace/libipt/internal/include/pt_block_cache.h
new file mode 100644
index 0000000000000..552fd93a7bb6c
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_block_cache.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_BLOCK_CACHE_H
+#define PT_BLOCK_CACHE_H
+
+#include "intel-pt.h"
+
+#include <stdint.h>
+
+
+/* A block cache entry qualifier.
+ *
+ * This describes what to do at the decision point determined by a block cache
+ * entry.
+ */
+enum pt_bcache_qualifier {
+ /* This is not a decision point.
+ *
+ * The next decision point is too far away and one or more fields
+ * threatened to overflow so we had to stop somewhere on our way.
+ *
+ * Apply the displacement and number of instructions and continue from
+ * the resulting IP.
+ */
+ ptbq_again,
+
+ /* The decision point is a conditional branch.
+ *
+ * This requires a conditional branch query.
+ *
+ * The isize field should provide the size of the branch instruction so
+ * only taken branches require the instruction to be decoded.
+ */
+ ptbq_cond,
+
+ /* The decision point is a near indirect call.
+ *
+ * This requires a return-address stack update and an indirect branch
+ * query.
+ *
+ * The isize field should provide the size of the call instruction so
+ * the return address can be computed by adding it to the displacement
+ * that brings us to the call instruction.
+ *
+ * No instruction decode is required.
+ */
+ ptbq_ind_call,
+
+ /* The decision point is a near return.
+ *
+ * The return may be compressed so this requires a conditional branch
+ * query to determine the compression state and either a return-address
+ * stack lookup or an indirect branch query.
+ *
+ * No instruction decode is required.
+ */
+ ptbq_return,
+
+ /* The decision point is an indirect jump or far branch.
+ *
+ * This requires an indirect branch query.
+ *
+ * No instruction decode is required.
+ */
+ ptbq_indirect,
+
+ /* The decision point requires the instruction at the decision point IP
+ * to be decoded to determine the next step.
+ *
+ * This is used for
+ *
+ * - near direct calls that need to maintain the return-address stack.
+ *
+ * - near direct jumps that are too far away to be handled with a
+ * block cache entry as they would overflow the displacement field.
+ */
+ ptbq_decode
+};
+
+/* A block cache entry.
+ *
+ * There will be one such entry per byte of decoded memory image. Each entry
+ * corresponds to an IP in the traced memory image. The cache is initialized
+ * with invalid entries for all IPs.
+ *
+ * Only entries for the first byte of each instruction will be used; other
+ * entries are ignored and will remain invalid.
+ *
+ * Each valid entry gives the distance from the entry's IP to the next decision
+ * point both in bytes and in the number of instructions.
+ */
+struct pt_bcache_entry {
+ /* The displacement to the next decision point in bytes.
+ *
+ * This is zero if we are at a decision point except for ptbq_again
+ * where it gives the displacement to the next block cache entry to be
+ * used.
+ */
+ int32_t displacement:16;
+
+ /* The number of instructions to the next decision point.
+ *
+ * This is typically one at a decision point since we are already
+ * accounting for the instruction at the decision point.
+ *
+ * Note that this field must be smaller than the respective struct
+ * pt_block field so we can fit one block cache entry into an empty
+ * block.
+ */
+ uint32_t ninsn:8;
+
+ /* The execution mode for all instruction between here and the next
+ * decision point.
+ *
+ * This is enum pt_exec_mode.
+ *
+ * This is ptem_unknown if the entry is not valid.
+ */
+ uint32_t mode:2;
+
+ /* The decision point qualifier.
+ *
+ * This is enum pt_bcache_qualifier.
+ */
+ uint32_t qualifier:3;
+
+ /* The size of the instruction at the decision point.
+ *
+ * This is zero if the size is too big to fit into the field. In this
+ * case, the instruction needs to be decoded to determine its size.
+ */
+ uint32_t isize:3;
+};
+
+/* Get the execution mode of a block cache entry. */
+static inline enum pt_exec_mode pt_bce_exec_mode(struct pt_bcache_entry bce)
+{
+ return (enum pt_exec_mode) bce.mode;
+}
+
+/* Get the block cache qualifier of a block cache entry. */
+static inline enum pt_bcache_qualifier
+pt_bce_qualifier(struct pt_bcache_entry bce)
+{
+ return (enum pt_bcache_qualifier) bce.qualifier;
+}
+
+/* Check if a block cache entry is valid. */
+static inline int pt_bce_is_valid(struct pt_bcache_entry bce)
+{
+ return pt_bce_exec_mode(bce) != ptem_unknown;
+}
+
+
+
+/* A block cache. */
+struct pt_block_cache {
+ /* The number of cache entries. */
+ uint32_t nentries;
+
+ /* A variable-length array of @nentries entries. */
+ struct pt_bcache_entry entry[];
+};
+
+/* Create a block cache.
+ *
+ * @nentries is the number of entries in the cache and should match the size of
+ * the to-be-cached section in bytes.
+ */
+extern struct pt_block_cache *pt_bcache_alloc(uint64_t nentries);
+
+/* Destroy a block cache. */
+extern void pt_bcache_free(struct pt_block_cache *bcache);
+
+/* Cache a block.
+ *
+ * It is expected that all calls for the same @index write the same @bce.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @bcache is NULL.
+ * Returns -pte_internal if @index is outside of @bcache.
+ */
+extern int pt_bcache_add(struct pt_block_cache *bcache, uint64_t index,
+ struct pt_bcache_entry bce);
+
+/* Lookup a cached block.
+ *
+ * The returned cache entry need not be valid. The caller is expected to check
+ * for validity using pt_bce_is_valid(*@bce).
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @bcache or @bce is NULL.
+ * Returns -pte_internal if @index is outside of @bcache.
+ */
+extern int pt_bcache_lookup(struct pt_bcache_entry *bce,
+ const struct pt_block_cache *bcache,
+ uint64_t index);
+
+#endif /* PT_BLOCK_CACHE_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_block_decoder.h b/contrib/processor-trace/libipt/internal/include/pt_block_decoder.h
new file mode 100644
index 0000000000000..b965be1fc5fd4
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_block_decoder.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_BLOCK_DECODER_H
+#define PT_BLOCK_DECODER_H
+
+#include "pt_query_decoder.h"
+#include "pt_image.h"
+#include "pt_retstack.h"
+#include "pt_ild.h"
+#include "pt_msec_cache.h"
+
+
+/* A block decoder.
+ *
+ * It decodes Intel(R) Processor Trace into a sequence of instruction blocks
+ * such that the instructions in each block can be decoded without further need
+ * of trace.
+ */
+struct pt_block_decoder {
+ /* The Intel(R) Processor Trace query decoder. */
+ struct pt_query_decoder query;
+
+ /* The configuration flags.
+ *
+ * Those are our flags set by the user. In @query.config.flags, we set
+ * the flags we need for the query decoder.
+ */
+ struct pt_conf_flags flags;
+
+ /* The default image. */
+ struct pt_image default_image;
+
+ /* The image. */
+ struct pt_image *image;
+
+ /* The current cached section. */
+ struct pt_msec_cache scache;
+
+ /* The current address space. */
+ struct pt_asid asid;
+
+ /* The current Intel(R) Processor Trace event. */
+ struct pt_event event;
+
+ /* The call/return stack for ret compression. */
+ struct pt_retstack retstack;
+
+ /* The current instruction.
+ *
+ * This is only valid if @process_insn is set.
+ */
+ struct pt_insn insn;
+ struct pt_insn_ext iext;
+
+ /* The start IP of the next block.
+ *
+ * If tracing is disabled, this is the IP at which we assume tracing to
+ * be resumed.
+ */
+ uint64_t ip;
+
+ /* The current execution mode. */
+ enum pt_exec_mode mode;
+
+ /* The status of the last successful decoder query.
+ *
+ * Errors are reported directly; the status is always a non-negative
+ * pt_status_flag bit-vector.
+ */
+ int status;
+
+ /* A collection of flags defining how to proceed flow reconstruction:
+ *
+ * - tracing is enabled.
+ */
+ uint32_t enabled:1;
+
+ /* - process @event. */
+ uint32_t process_event:1;
+
+ /* - instructions are executed speculatively. */
+ uint32_t speculative:1;
+
+ /* - process @insn/@iext.
+ *
+ * We have started processing events binding to @insn/@iext. The
+ * instruction has been accounted for in the previous block, but we
+ * have not yet proceeded past it.
+ *
+ * We will do so in pt_blk_event() after processing all events that
+ * bind to it.
+ */
+ uint32_t process_insn:1;
+
+ /* - a paging event has already been bound to @insn/@iext. */
+ uint32_t bound_paging:1;
+
+ /* - a vmcs event has already been bound to @insn/@iext. */
+ uint32_t bound_vmcs:1;
+
+ /* - a ptwrite event has already been bound to @insn/@iext. */
+ uint32_t bound_ptwrite:1;
+};
+
+
+/* Initialize a block decoder.
+ *
+ * Returns zero on success; a negative error code otherwise.
+ * Returns -pte_internal, if @decoder or @config is NULL.
+ */
+extern int pt_blk_decoder_init(struct pt_block_decoder *decoder,
+ const struct pt_config *config);
+
+/* Finalize a block decoder. */
+extern void pt_blk_decoder_fini(struct pt_block_decoder *decoder);
+
+#endif /* PT_BLOCK_DECODER_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_config.h b/contrib/processor-trace/libipt/internal/include/pt_config.h
new file mode 100644
index 0000000000000..406130efb5d78
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_config.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "intel-pt.h"
+
+
+/* Read the configuration provided by a library user and zero-initialize
+ * missing fields.
+ *
+ * We keep the user's size value if it is smaller than sizeof(*@config) to
+ * allow decoders to detect missing configuration bits.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @config is NULL.
+ * Returns -pte_invalid if @uconfig is NULL.
+ * Returns -pte_bad_config if @config is too small to be useful.
+ */
+extern int pt_config_from_user(struct pt_config *config,
+ const struct pt_config *uconfig);
+
+/* Get the configuration for the n'th address filter.
+ *
+ * Returns zero if @filter is NULL or @n is out of bounds.
+ *
+ * This corresponds to IA32_RTIT_CTL.ADDRn_CFG.
+ */
+extern uint32_t pt_filter_addr_cfg(const struct pt_conf_addr_filter *filter,
+ uint8_t n);
+
+/* Get the lower bound (inclusive) of the n'th address filter.
+ *
+ * Returns zero if @filter is NULL or @n is out of bounds.
+ *
+ * This corresponds to IA32_RTIT_ADDRn_A.
+ */
+extern uint64_t pt_filter_addr_a(const struct pt_conf_addr_filter *filter,
+ uint8_t n);
+
+/* Get the upper bound (inclusive) of the n'th address filter.
+ *
+ * Returns zero if @filter is NULL or @n is out of bounds.
+ *
+ * This corresponds to IA32_RTIT_ADDRn_B.
+ */
+extern uint64_t pt_filter_addr_b(const struct pt_conf_addr_filter *filter,
+ uint8_t n);
+
+/* Check address filters.
+ *
+ * Checks @addr against @filter.
+ *
+ * Returns a positive number if @addr lies in a tracing-enabled region.
+ * Returns zero if @addr lies in a tracing-disabled region.
+ * Returns a negative pt_error_code otherwise.
+ */
+extern int pt_filter_addr_check(const struct pt_conf_addr_filter *filter,
+ uint64_t addr);
diff --git a/contrib/processor-trace/libipt/internal/include/pt_cpu.h b/contrib/processor-trace/libipt/internal/include/pt_cpu.h
new file mode 100644
index 0000000000000..3ab40446083fa
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_cpu.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_CPU_H
+#define PT_CPU_H
+
+struct pt_cpu;
+
+/* Parses @s which should be of format family/model[/stepping] and
+ * stores the value in @cpu on success.
+ * The optional stepping defaults to 0 if omitted.
+ *
+ * Returns 0 on success.
+ * Returns -pte_invalid if @cpu or @s is NULL.
+ * Returns -pte_invalid if @s could not be parsed.
+ */
+extern int pt_cpu_parse(struct pt_cpu *cpu, const char *s);
+
+/* Get the cpu we're running on.
+ *
+ * Reads the family/model/stepping of the processor on which this function
+ * is executed and stores the value in @cpu.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_invalid if @cpu is NULL.
+ */
+extern int pt_cpu_read(struct pt_cpu *cpu);
+
+#endif /* PT_CPU_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_cpuid.h b/contrib/processor-trace/libipt/internal/include/pt_cpuid.h
new file mode 100644
index 0000000000000..e5afabf9079bd
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_cpuid.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_CPUID_H
+#define PT_CPUID_H
+
+#include <inttypes.h>
+
+/* Execute cpuid with @leaf set in the eax register.
+ * The result is stored in @eax, @ebx, @ecx and @edx.
+ */
+extern void pt_cpuid(uint32_t leaf, uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx);
+
+#endif /* PT_CPUID_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_decoder_function.h b/contrib/processor-trace/libipt/internal/include/pt_decoder_function.h
new file mode 100644
index 0000000000000..9bed3f29f7202
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_decoder_function.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_DECODER_FUNCTION_H
+#define PT_DECODER_FUNCTION_H
+
+#include <stdint.h>
+
+struct pt_query_decoder;
+struct pt_packet_decoder;
+struct pt_packet;
+struct pt_config;
+
+
+/* Intel(R) Processor Trace decoder function flags. */
+enum pt_decoder_function_flag {
+ /* The decoded packet contains an unconditional branch destination. */
+ pdff_tip = 1 << 0,
+
+ /* The decode packet contains unconditional branch destinations. */
+ pdff_tnt = 1 << 1,
+
+ /* The decoded packet contains an event. */
+ pdff_event = 1 << 2,
+
+ /* The decoded packet marks the end of a PSB header. */
+ pdff_psbend = 1 << 3,
+
+ /* The decoded packet contains a non-branch IP update. */
+ pdff_fup = 1 << 4,
+
+ /* The decoded packet is unknown to the decoder. */
+ pdff_unknown = 1 << 5,
+
+ /* The decoded packet contains timing information. */
+ pdff_timing = 1 << 6,
+
+ /* The decoded packet contains padding. */
+ pdff_pad = 1 << 7
+};
+
+/* An Intel(R) Processor Trace decoder function. */
+struct pt_decoder_function {
+ /* The function to analyze the next packet. */
+ int (*packet)(struct pt_packet_decoder *, struct pt_packet *);
+
+ /* The function to decode the next packet. */
+ int (*decode)(struct pt_query_decoder *);
+
+ /* The function to decode the next packet in segment header
+ * context, i.e. between PSB and ENDPSB.
+ */
+ int (*header)(struct pt_query_decoder *);
+
+ /* Decoder function flags. */
+ int flags;
+};
+
+
+/* Fetch the decoder function.
+ *
+ * Sets @dfun to the decoder function for decoding the packet at @pos.
+ *
+ * Returns 0 on success.
+ * Returns -pte_internal if @dfun or @config is NULL.
+ * Returns -pte_nosync if @pos is NULL or outside @config's trace buffer.
+ * Returns -pte_eos if the opcode is incomplete or missing.
+ */
+extern int pt_df_fetch(const struct pt_decoder_function **dfun,
+ const uint8_t *pos, const struct pt_config *config);
+
+
+/* Decoder functions for the various packet types.
+ *
+ * Do not call those functions directly!
+ */
+extern const struct pt_decoder_function pt_decode_unknown;
+extern const struct pt_decoder_function pt_decode_pad;
+extern const struct pt_decoder_function pt_decode_psb;
+extern const struct pt_decoder_function pt_decode_tip;
+extern const struct pt_decoder_function pt_decode_tnt_8;
+extern const struct pt_decoder_function pt_decode_tnt_64;
+extern const struct pt_decoder_function pt_decode_tip_pge;
+extern const struct pt_decoder_function pt_decode_tip_pgd;
+extern const struct pt_decoder_function pt_decode_fup;
+extern const struct pt_decoder_function pt_decode_pip;
+extern const struct pt_decoder_function pt_decode_ovf;
+extern const struct pt_decoder_function pt_decode_mode;
+extern const struct pt_decoder_function pt_decode_psbend;
+extern const struct pt_decoder_function pt_decode_tsc;
+extern const struct pt_decoder_function pt_decode_cbr;
+extern const struct pt_decoder_function pt_decode_tma;
+extern const struct pt_decoder_function pt_decode_mtc;
+extern const struct pt_decoder_function pt_decode_cyc;
+extern const struct pt_decoder_function pt_decode_stop;
+extern const struct pt_decoder_function pt_decode_vmcs;
+extern const struct pt_decoder_function pt_decode_mnt;
+extern const struct pt_decoder_function pt_decode_exstop;
+extern const struct pt_decoder_function pt_decode_mwait;
+extern const struct pt_decoder_function pt_decode_pwre;
+extern const struct pt_decoder_function pt_decode_pwrx;
+extern const struct pt_decoder_function pt_decode_ptw;
+
+#endif /* PT_DECODER_FUNCTION_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_encoder.h b/contrib/processor-trace/libipt/internal/include/pt_encoder.h
new file mode 100644
index 0000000000000..9d48a34a863d2
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_encoder.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_ENCODER_H
+#define PT_ENCODER_H
+
+#include "intel-pt.h"
+
+
+/* An Intel PT packet encoder. */
+struct pt_encoder {
+ /* The encoder configuration. */
+ struct pt_config config;
+
+ /** The current position in the trace buffer. */
+ uint8_t *pos;
+};
+
+
+/* Initialize the packet encoder.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern int pt_encoder_init(struct pt_encoder *, const struct pt_config *);
+
+/* Finalize the packet encoder. */
+extern void pt_encoder_fini(struct pt_encoder *);
+
+
+/* The below functions are convenience wrappers around pt_enc_next(). */
+
+/* Encode a Padding (pad) packet. */
+extern int pt_encode_pad(struct pt_encoder *);
+
+/* Encode a Packet Stream Boundary (psb) packet. */
+extern int pt_encode_psb(struct pt_encoder *);
+
+/* Encode an End PSB (psbend) packet. */
+extern int pt_encode_psbend(struct pt_encoder *);
+
+/* Encode a Target Instruction Pointer (tip) packet. */
+extern int pt_encode_tip(struct pt_encoder *, uint64_t ip,
+ enum pt_ip_compression ipc);
+
+/* Encode a Taken Not Taken (tnt) packet - 8-bit version. */
+extern int pt_encode_tnt_8(struct pt_encoder *, uint8_t tnt, int size);
+
+/* Encode a Taken Not Taken (tnt) packet - 64-bit version. */
+extern int pt_encode_tnt_64(struct pt_encoder *, uint64_t tnt, int size);
+
+/* Encode a Packet Generation Enable (tip.pge) packet. */
+extern int pt_encode_tip_pge(struct pt_encoder *, uint64_t ip,
+ enum pt_ip_compression ipc);
+
+/* Encode a Packet Generation Disable (tip.pgd) packet. */
+extern int pt_encode_tip_pgd(struct pt_encoder *, uint64_t ip,
+ enum pt_ip_compression ipc);
+
+/* Encode a Flow Update Packet (fup). */
+extern int pt_encode_fup(struct pt_encoder *, uint64_t ip,
+ enum pt_ip_compression ipc);
+
+/* Encode a Paging Information Packet (pip). */
+extern int pt_encode_pip(struct pt_encoder *, uint64_t cr3, uint8_t flags);
+
+/* Encode a Overflow Packet (ovf). */
+extern int pt_encode_ovf(struct pt_encoder *);
+
+/* Encode a Mode Exec Packet (mode.exec). */
+extern int pt_encode_mode_exec(struct pt_encoder *, enum pt_exec_mode);
+
+/* Encode a Mode Tsx Packet (mode.tsx). */
+extern int pt_encode_mode_tsx(struct pt_encoder *, uint8_t);
+
+/* Encode a Time Stamp Counter (tsc) packet. */
+extern int pt_encode_tsc(struct pt_encoder *, uint64_t);
+
+/* Encode a Core Bus Ratio (cbr) packet. */
+extern int pt_encode_cbr(struct pt_encoder *, uint8_t);
+
+/* Encode a TSC/MTC Alignment (tma) packet. */
+extern int pt_encode_tma(struct pt_encoder *, uint16_t ctc,
+ uint16_t fc);
+
+/* Encode a Mini Time Counter (mtc) packet. */
+extern int pt_encode_mtc(struct pt_encoder *, uint8_t ctc);
+
+/* Encode a Cycle Count (cyc) packet. */
+extern int pt_encode_cyc(struct pt_encoder *, uint32_t cyc);
+
+/* Encode a TraceStop Packet (stop). */
+extern int pt_encode_stop(struct pt_encoder *);
+
+/* Encode a VMCS packet. */
+extern int pt_encode_vmcs(struct pt_encoder *, uint64_t);
+
+/* Encode a Maintenance (mnt) packet. */
+extern int pt_encode_mnt(struct pt_encoder *, uint64_t);
+
+#endif /* PT_ENCODER_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_event_queue.h b/contrib/processor-trace/libipt/internal/include/pt_event_queue.h
new file mode 100644
index 0000000000000..c606dfa397528
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_event_queue.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_EVENT_QUEUE_H
+#define PT_EVENT_QUEUE_H
+
+#include "intel-pt.h"
+
+#include <stdint.h>
+
+
+/* Events are grouped by the packet the event binds to. */
+enum pt_event_binding {
+ evb_psbend,
+ evb_tip,
+ evb_fup,
+
+ evb_max
+};
+
+enum {
+ /* The maximal number of pending events - should be a power of two. */
+ evq_max = 8
+};
+
+/* A queue of events. */
+struct pt_event_queue {
+ /* A collection of event queues, one per binding. */
+ struct pt_event queue[evb_max][evq_max];
+
+ /* The begin and end indices for the above event queues. */
+ uint8_t begin[evb_max];
+ uint8_t end[evb_max];
+
+ /* A standalone event to be published immediately. */
+ struct pt_event standalone;
+};
+
+
+/* Initialize (or reset) an event queue. */
+extern void pt_evq_init(struct pt_event_queue *);
+
+/* Get a standalone event.
+ *
+ * Returns a pointer to the standalone event on success.
+ * Returns NULL if @evq is NULL.
+ */
+extern struct pt_event *pt_evq_standalone(struct pt_event_queue *evq);
+
+/* Enqueue an event.
+ *
+ * Adds a new event to @evq for binding @evb.
+ *
+ * Returns a pointer to the new event on success.
+ * Returns NULL if @evq is NULL or @evb is invalid.
+ * Returns NULL if @evq is full.
+ */
+extern struct pt_event *pt_evq_enqueue(struct pt_event_queue *evq,
+ enum pt_event_binding evb);
+
+
+/* Dequeue an event.
+ *
+ * Removes the first event for binding @evb from @evq.
+ *
+ * Returns a pointer to the dequeued event on success.
+ * Returns NULL if @evq is NULL or @evb is invalid.
+ * Returns NULL if @evq is empty.
+ */
+extern struct pt_event *pt_evq_dequeue(struct pt_event_queue *evq,
+ enum pt_event_binding evb);
+
+/* Clear a queue and discard events.
+ *
+ * Removes all events for binding @evb from @evq.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @evq is NULL or @evb is invalid.
+ */
+extern int pt_evq_clear(struct pt_event_queue *evq,
+ enum pt_event_binding evb);
+
+/* Check for emptiness.
+ *
+ * Check if @evq for binding @evb is empty.
+ *
+ * Returns a positive number if @evq is empty.
+ * Returns zero if @evq is not empty.
+ * Returns -pte_internal if @evq is NULL or @evb is invalid.
+ */
+extern int pt_evq_empty(const struct pt_event_queue *evq,
+ enum pt_event_binding evb);
+
+/* Check for non-emptiness.
+ *
+ * Check if @evq for binding @evb contains pending events.
+ *
+ * Returns a positive number if @evq is not empty.
+ * Returns zero if @evq is empty.
+ * Returns -pte_internal if @evq is NULL or @evb is invalid.
+ */
+extern int pt_evq_pending(const struct pt_event_queue *evq,
+ enum pt_event_binding evb);
+
+/* Find an event by type.
+ *
+ * Searches @evq for binding @evb for an event of type @evt.
+ *
+ * Returns a pointer to the first matching event on success.
+ * Returns NULL if there is no such event.
+ * Returns NULL if @evq is NULL.
+ * Returns NULL if @evb or @evt is invalid.
+ */
+extern struct pt_event *pt_evq_find(struct pt_event_queue *evq,
+ enum pt_event_binding evb,
+ enum pt_event_type evt);
+
+#endif /* PT_EVENT_QUEUE_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_ild.h b/contrib/processor-trace/libipt/internal/include/pt_ild.h
new file mode 100644
index 0000000000000..d0d0e915fb07a
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_ild.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(PT_ILD_H)
+#define PT_ILD_H
+
+#include "pt_insn.h"
+
+#include "intel-pt.h"
+
+
+typedef enum {
+ PTI_MAP_0, /* 1-byte opcodes. may have modrm */
+ PTI_MAP_1, /* 2-byte opcodes (0x0f). may have modrm */
+ PTI_MAP_2, /* 3-byte opcodes (0x0f38). has modrm */
+ PTI_MAP_3, /* 3-byte opcodes (0x0f3a). has modrm */
+ PTI_MAP_AMD3DNOW, /* 3d-now opcodes (0x0f0f). has modrm */
+ PTI_MAP_INVALID
+} pti_map_enum_t;
+
+struct pt_ild {
+ /* inputs */
+ uint8_t const *itext;
+ uint8_t max_bytes; /*1..15 bytes */
+ enum pt_exec_mode mode;
+
+ union {
+ struct {
+ uint32_t osz:1;
+ uint32_t asz:1;
+ uint32_t lock:1;
+ uint32_t f3:1;
+ uint32_t f2:1;
+ uint32_t last_f2f3:2; /* 2 or 3 */
+ /* The vex bit is set for c4/c5 VEX and EVEX. */
+ uint32_t vex:1;
+ /* The REX.R and REX.W bits in REX, VEX, or EVEX. */
+ uint32_t rex_r:1;
+ uint32_t rex_w:1;
+ } s;
+ uint32_t i;
+ } u;
+ uint8_t imm1_bytes; /* # of bytes in 1st immediate */
+ uint8_t imm2_bytes; /* # of bytes in 2nd immediate */
+ uint8_t disp_bytes; /* # of displacement bytes */
+ uint8_t modrm_byte;
+ /* 5b but valid values= 0,1,2,3 could be in bit union */
+ uint8_t map;
+ uint8_t rex; /* 0b0100wrxb */
+ uint8_t nominal_opcode;
+ uint8_t disp_pos;
+ /* imm_pos can be derived from disp_pos + disp_bytes. */
+};
+
+static inline pti_map_enum_t pti_get_map(const struct pt_ild *ild)
+{
+ return (pti_map_enum_t) ild->map;
+}
+
+static inline uint8_t pti_get_modrm_mod(const struct pt_ild *ild)
+{
+ return ild->modrm_byte >> 6;
+}
+
+static inline uint8_t pti_get_modrm_reg(const struct pt_ild *ild)
+{
+ return (ild->modrm_byte >> 3) & 7;
+}
+
+static inline uint8_t pti_get_modrm_rm(const struct pt_ild *ild)
+{
+ return ild->modrm_byte & 7;
+}
+
+/* MAIN ENTRANCE POINTS */
+
+/* one time call. not thread safe init. call when single threaded. */
+extern void pt_ild_init(void);
+
+/* all decoding is multithread safe. */
+
+/* Decode one instruction.
+ *
+ * Input:
+ *
+ * @insn->ip: the virtual address of the instruction
+ * @insn->raw: the memory at that virtual address
+ * @insn->size: the maximal size of the instruction
+ * @insn->mode: the execution mode
+ *
+ * Output:
+ *
+ * @insn->size: the actual size of the instruction
+ * @insn->iclass: a coarse classification
+ *
+ * @iext->iclass: a finer grain classification
+ * @iext->variant: instruction class dependent information
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern int pt_ild_decode(struct pt_insn *insn, struct pt_insn_ext *iext);
+
+#endif /* PT_ILD_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_image.h b/contrib/processor-trace/libipt/internal/include/pt_image.h
new file mode 100644
index 0000000000000..dbc2186bea18b
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_image.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_IMAGE_H
+#define PT_IMAGE_H
+
+#include "pt_mapped_section.h"
+
+#include "intel-pt.h"
+
+#include <stdint.h>
+
+
+/* A list of sections. */
+struct pt_section_list {
+ /* The next list element. */
+ struct pt_section_list *next;
+
+ /* The mapped section. */
+ struct pt_mapped_section section;
+
+ /* The image section identifier. */
+ int isid;
+};
+
+/* A traced image consisting of a collection of sections. */
+struct pt_image {
+ /* The optional image name. */
+ char *name;
+
+ /* The list of sections. */
+ struct pt_section_list *sections;
+
+ /* An optional read memory callback. */
+ struct {
+ /* The callback function. */
+ read_memory_callback_t *callback;
+
+ /* The callback context. */
+ void *context;
+ } readmem;
+};
+
+/* Initialize an image with an optional @name. */
+extern void pt_image_init(struct pt_image *image, const char *name);
+
+/* Finalize an image.
+ *
+ * This removes all sections and frees the name.
+ */
+extern void pt_image_fini(struct pt_image *image);
+
+/* Add a section to an image.
+ *
+ * Add @section identified by @isid to @image at @vaddr in @asid. If @section
+ * overlaps with existing sections, the existing sections are shrunk, split, or
+ * removed to accomodate @section. Absence of a section identifier is indicated
+ * by an @isid of zero.
+ *
+ * Returns zero on success.
+ * Returns -pte_internal if @image, @section, or @asid is NULL.
+ */
+extern int pt_image_add(struct pt_image *image, struct pt_section *section,
+ const struct pt_asid *asid, uint64_t vaddr, int isid);
+
+/* Remove a section from an image.
+ *
+ * Returns zero on success.
+ * Returns -pte_internal if @image, @section, or @asid is NULL.
+ * Returns -pte_bad_image if @image does not contain @section at @vaddr.
+ */
+extern int pt_image_remove(struct pt_image *image, struct pt_section *section,
+ const struct pt_asid *asid, uint64_t vaddr);
+
+/* Read memory from an image.
+ *
+ * Reads at most @size bytes from @image at @addr in @asid into @buffer.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ * Returns -pte_internal if @image, @isid, @buffer, or @asid is NULL.
+ * Returns -pte_nomap if the section does not contain @addr.
+ */
+extern int pt_image_read(struct pt_image *image, int *isid, uint8_t *buffer,
+ uint16_t size, const struct pt_asid *asid,
+ uint64_t addr);
+
+/* Find an image section.
+ *
+ * Find the section containing @vaddr in @asid and provide it in @msec. On
+ * success, takes a reference of @msec->section that the caller needs to put
+ * after use.
+ *
+ * Returns the section's identifier on success, a negative error code otherwise.
+ * Returns -pte_internal if @image, @msec, or @asid is NULL.
+ * Returns -pte_nomap if there is no such section in @image.
+ */
+extern int pt_image_find(struct pt_image *image, struct pt_mapped_section *msec,
+ const struct pt_asid *asid, uint64_t vaddr);
+
+/* Validate an image section.
+ *
+ * Validate that a lookup of @vaddr in @msec->asid in @image would result in
+ * @msec identified by @isid.
+ *
+ * Validation may fail sporadically.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_invalid if @image or @msec is NULL.
+ * Returns -pte_nomap if validation failed.
+ */
+extern int pt_image_validate(const struct pt_image *image,
+ const struct pt_mapped_section *msec,
+ uint64_t vaddr, int isid);
+
+#endif /* PT_IMAGE_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_image_section_cache.h b/contrib/processor-trace/libipt/internal/include/pt_image_section_cache.h
new file mode 100644
index 0000000000000..1e7f0d358af2a
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_image_section_cache.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_IMAGE_SECTION_CACHE_H
+#define PT_IMAGE_SECTION_CACHE_H
+
+#include <stdint.h>
+
+#if defined(FEATURE_THREADS)
+# include <threads.h>
+#endif /* defined(FEATURE_THREADS) */
+
+struct pt_section;
+
+
+/* An image section cache entry. */
+struct pt_iscache_entry {
+ /* The section object.
+ *
+ * We hold a reference to the section - put it when the section is
+ * removed from the cache.
+ */
+ struct pt_section *section;
+
+ /* The base address at which @section has been loaded. */
+ uint64_t laddr;
+};
+
+/* An image section cache least recently used cache entry. */
+struct pt_iscache_lru_entry {
+ /* The next entry in a list ordered by recent use. */
+ struct pt_iscache_lru_entry *next;
+
+ /* The section mapped by the image section cache. */
+ struct pt_section *section;
+
+ /* The amount of memory used by mapping @section in bytes. */
+ uint64_t size;
+};
+
+/* A cache of image sections and their load addresses.
+ *
+ * We combine the section with its load address to reduce the amount of
+ * information we need to store in order to read from a cached section by
+ * virtual address.
+ *
+ * Internally, the section object will be shared if it is loaded at different
+ * addresses in the cache.
+ *
+ * The cache does not consider the address-space the section is mapped into.
+ * This is not relevant for reading from the section.
+ */
+struct pt_image_section_cache {
+ /* The optional name of the cache; NULL if not named. */
+ char *name;
+
+ /* An array of @nentries cached sections. */
+ struct pt_iscache_entry *entries;
+
+ /* A list of mapped sections ordered by time of last access. */
+ struct pt_iscache_lru_entry *lru;
+
+ /* The memory limit for our LRU cache. */
+ uint64_t limit;
+
+ /* The current size of our LRU cache. */
+ uint64_t used;
+
+#if defined(FEATURE_THREADS)
+ /* A lock protecting this image section cache. */
+ mtx_t lock;
+#endif /* defined(FEATURE_THREADS) */
+
+ /* The capacity of the @entries array.
+ *
+ * Cached sections are identified by a positive integer, the image
+ * section identifier (isid), which is derived from their index into the
+ * @entries array.
+ *
+ * We can't expand the section cache capacity beyond INT_MAX.
+ */
+ uint16_t capacity;
+
+ /* The current size of the cache in number of entries.
+ *
+ * This is smaller than @capacity if there is still room in the @entries
+ * array; equal to @capacity if the @entries array is full and needs to
+ * be reallocated.
+ */
+ uint16_t size;
+};
+
+
+/* Initialize an image section cache. */
+extern int pt_iscache_init(struct pt_image_section_cache *iscache,
+ const char *name);
+
+/* Finalize an image section cache. */
+extern void pt_iscache_fini(struct pt_image_section_cache *iscache);
+
+/* Add a section to the cache.
+ *
+ * Adds @section at @laddr to @iscache and returns its isid. If a similar
+ * section is already cached, returns that section's isid, instead.
+ *
+ * We take a full section rather than its filename and range in that file to
+ * avoid the dependency to pt_section.h. Callers are expected to query the
+ * cache before creating the section, so we should only see unnecessary section
+ * creation/destruction on insertion races.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @iscache or @section is NULL.
+ * Returns -pte_internal if @section's filename is NULL.
+ */
+extern int pt_iscache_add(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t laddr);
+
+/* Find a section in the cache.
+ *
+ * Returns a positive isid if a section matching @filename, @offset, @size
+ * loaded at @laddr is found in @iscache.
+ * Returns zero if no such section is found.
+ * Returns a negative error code otherwise.
+ * Returns -pte_internal if @iscache or @filename is NULL.
+ */
+extern int pt_iscache_find(struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset,
+ uint64_t size, uint64_t laddr);
+
+/* Lookup the section identified by its isid.
+ *
+ * Provides a reference to the section in @section and its load address in
+ * @laddr on success. The caller is expected to put the returned section after
+ * use.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @iscache, @section, or @laddr is NULL.
+ * Returns -pte_bad_image if @iscache does not contain @isid.
+ */
+extern int pt_iscache_lookup(struct pt_image_section_cache *iscache,
+ struct pt_section **section, uint64_t *laddr,
+ int isid);
+
+/* Clear an image section cache. */
+extern int pt_iscache_clear(struct pt_image_section_cache *iscache);
+
+/* Notify about the mapping of a cached section.
+ *
+ * Notifies @iscache that @section has been mapped.
+ *
+ * The caller guarantees that @iscache contains @section (by using @section's
+ * iscache pointer) and prevents @iscache from detaching.
+ *
+ * The caller must not lock @section to allow @iscache to map it. This function
+ * must not try to detach from @section.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ * Returns -pte_internal if @iscache or @section is NULL.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_iscache_notify_map(struct pt_image_section_cache *iscache,
+ struct pt_section *section);
+
+/* Notify about a size change of a mapped section.
+ *
+ * Notifies @iscache that @section's size has changed while it was mapped.
+ *
+ * The caller guarantees that @iscache contains @section (by using @section's
+ * iscache pointer) and prevents @iscache from detaching.
+ *
+ * The caller must not lock @section to allow @iscache to map it. This function
+ * must not try to detach from @section.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ * Returns -pte_internal if @iscache or @section is NULL.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_iscache_notify_resize(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t size);
+
+#endif /* PT_IMAGE_SECTION_CACHE_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_insn.h b/contrib/processor-trace/libipt/internal/include/pt_insn.h
new file mode 100644
index 0000000000000..22039827daafb
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_insn.h
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_INSN_H
+#define PT_INSN_H
+
+#include <inttypes.h>
+
+#include "intel-pt.h"
+
+struct pt_insn_ext;
+
+
+/* A finer-grain classification of instructions used internally. */
+typedef enum {
+ PTI_INST_INVALID,
+
+ PTI_INST_CALL_9A,
+ PTI_INST_CALL_FFr3,
+ PTI_INST_CALL_FFr2,
+ PTI_INST_CALL_E8,
+ PTI_INST_INT,
+
+ PTI_INST_INT3,
+ PTI_INST_INT1,
+ PTI_INST_INTO,
+ PTI_INST_IRET, /* includes IRETD and IRETQ (EOSZ determines) */
+
+ PTI_INST_JMP_E9,
+ PTI_INST_JMP_EB,
+ PTI_INST_JMP_EA,
+ PTI_INST_JMP_FFr5, /* REXW? */
+ PTI_INST_JMP_FFr4,
+ PTI_INST_JCC,
+ PTI_INST_JrCXZ,
+ PTI_INST_LOOP,
+ PTI_INST_LOOPE, /* aka Z */
+ PTI_INST_LOOPNE, /* aka NE */
+
+ PTI_INST_MOV_CR3,
+
+ PTI_INST_RET_C3,
+ PTI_INST_RET_C2,
+ PTI_INST_RET_CB,
+ PTI_INST_RET_CA,
+
+ PTI_INST_SYSCALL,
+ PTI_INST_SYSENTER,
+ PTI_INST_SYSEXIT,
+ PTI_INST_SYSRET,
+
+ PTI_INST_VMLAUNCH,
+ PTI_INST_VMRESUME,
+ PTI_INST_VMCALL,
+ PTI_INST_VMPTRLD,
+
+ PTI_INST_PTWRITE,
+
+ PTI_INST_LAST
+} pti_inst_enum_t;
+
+/* Information about an instruction we need internally in addition to the
+ * information provided in struct pt_insn.
+ */
+struct pt_insn_ext {
+ /* A more detailed instruction class. */
+ pti_inst_enum_t iclass;
+
+ /* Instruction-specific information. */
+ union {
+ /* For branch instructions. */
+ struct {
+ /* The branch displacement.
+ *
+ * This is only valid for direct calls/jumps.
+ *
+ * The displacement is applied to the address of the
+ * instruction following the branch.
+ */
+ int32_t displacement;
+
+ /* A flag saying whether the branch is direct.
+ *
+ * non-zero: direct
+ * zero: indirect
+ *
+ * This is expected to go away someday when we extend
+ * enum pt_insn_class to distinguish direct and indirect
+ * branches.
+ */
+ uint8_t is_direct;
+ } branch;
+ } variant;
+};
+
+
+/* Check if the instruction @insn/@iext changes the current privilege level.
+ *
+ * Returns non-zero if it does, zero if it doesn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_changes_cpl(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Check if the instruction @insn/@iext changes CR3.
+ *
+ * Returns non-zero if it does, zero if it doesn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_changes_cr3(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Check if the instruction @insn/@iext is a (near or far) branch.
+ *
+ * Returns non-zero if it is, zero if it isn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_is_branch(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Check if the instruction @insn/@iext is a far branch.
+ *
+ * Returns non-zero if it is, zero if it isn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_is_far_branch(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Check if the instruction @insn/@iext binds to a PIP packet.
+ *
+ * Returns non-zero if it does, zero if it doesn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_binds_to_pip(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Check if the instruction @insn/@iext binds to a VMCS packet.
+ *
+ * Returns non-zero if it does, zero if it doesn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_binds_to_vmcs(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Check if the instruction @insn/@iext is a ptwrite instruction.
+ *
+ * Returns non-zero if it is, zero if it isn't (or @insn/@iext is NULL).
+ */
+extern int pt_insn_is_ptwrite(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Determine the IP of the next instruction.
+ *
+ * Tries to determine the IP of the next instruction without using trace and
+ * provides it in @ip unless @ip is NULL.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_bad_query if the IP can't be determined.
+ * Returns -pte_internal if @insn or @iext is NULL.
+ */
+extern int pt_insn_next_ip(uint64_t *ip, const struct pt_insn *insn,
+ const struct pt_insn_ext *iext);
+
+/* Decode and analyze one instruction.
+ *
+ * Decodes the instructruction at @insn->ip in @insn->mode into @insn and @iext.
+ *
+ * If the instruction can not be decoded using a single memory read in a single
+ * section, sets @insn->truncated and reads the missing bytes from one or more
+ * other sections until either the instruction can be decoded or we're sure it
+ * is invalid.
+ *
+ * Returns the size in bytes on success, a negative error code otherwise.
+ * Returns -pte_bad_insn if the instruction could not be decoded.
+ */
+extern int pt_insn_decode(struct pt_insn *insn, struct pt_insn_ext *iext,
+ struct pt_image *image, const struct pt_asid *asid);
+
+/* Determine if a range of instructions is contiguous.
+ *
+ * Try to proceed from IP @begin to IP @end in @asid without using trace.
+ *
+ * Returns a positive integer if we reach @end from @begin.
+ * Returns zero if we couldn't reach @end within @nsteps steps.
+ * Returns a negative error code otherwise.
+ */
+extern int pt_insn_range_is_contiguous(uint64_t begin, uint64_t end,
+ enum pt_exec_mode mode,
+ struct pt_image *image,
+ const struct pt_asid *asid,
+ size_t nsteps);
+
+#endif /* PT_INSN_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_insn_decoder.h b/contrib/processor-trace/libipt/internal/include/pt_insn_decoder.h
new file mode 100644
index 0000000000000..70b47f33e491e
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_insn_decoder.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_INSN_DECODER_H
+#define PT_INSN_DECODER_H
+
+#include "pt_query_decoder.h"
+#include "pt_image.h"
+#include "pt_retstack.h"
+#include "pt_ild.h"
+#include "pt_msec_cache.h"
+
+#include <inttypes.h>
+
+
+struct pt_insn_decoder {
+ /* The Intel(R) Processor Trace query decoder. */
+ struct pt_query_decoder query;
+
+ /* The configuration flags.
+ *
+ * Those are our flags set by the user. In @query.config.flags, we set
+ * the flags we need for the query decoder.
+ */
+ struct pt_conf_flags flags;
+
+ /* The default image. */
+ struct pt_image default_image;
+
+ /* The image. */
+ struct pt_image *image;
+
+ /* The current cached section. */
+ struct pt_msec_cache scache;
+
+ /* The current address space. */
+ struct pt_asid asid;
+
+ /* The current Intel(R) Processor Trace event. */
+ struct pt_event event;
+
+ /* The call/return stack for ret compression. */
+ struct pt_retstack retstack;
+
+ /* The current instruction.
+ *
+ * This is only valid if @process_insn is set.
+ */
+ struct pt_insn insn;
+ struct pt_insn_ext iext;
+
+ /* The current IP.
+ *
+ * If tracing is disabled, this is the IP at which we assume tracing to
+ * be resumed.
+ */
+ uint64_t ip;
+
+ /* The current execution mode. */
+ enum pt_exec_mode mode;
+
+ /* The status of the last successful decoder query.
+ *
+ * Errors are reported directly; the status is always a non-negative
+ * pt_status_flag bit-vector.
+ */
+ int status;
+
+ /* A collection of flags defining how to proceed flow reconstruction:
+ *
+ * - tracing is enabled.
+ */
+ uint32_t enabled:1;
+
+ /* - process @event. */
+ uint32_t process_event:1;
+
+ /* - instructions are executed speculatively. */
+ uint32_t speculative:1;
+
+ /* - process @insn/@iext.
+ *
+ * We have started processing events binding to @insn/@iext. We have
+ * not yet proceeded past it.
+ *
+ * We will do so in pt_insn_event() after processing all events that
+ * bind to it.
+ */
+ uint32_t process_insn:1;
+
+ /* - a paging event has already been bound to @insn/@iext. */
+ uint32_t bound_paging:1;
+
+ /* - a vmcs event has already been bound to @insn/@iext. */
+ uint32_t bound_vmcs:1;
+
+ /* - a ptwrite event has already been bound to @insn/@iext. */
+ uint32_t bound_ptwrite:1;
+};
+
+
+/* Initialize an instruction flow decoder.
+ *
+ * Returns zero on success; a negative error code otherwise.
+ * Returns -pte_internal, if @decoder is NULL.
+ * Returns -pte_invalid, if @config is NULL.
+ */
+extern int pt_insn_decoder_init(struct pt_insn_decoder *decoder,
+ const struct pt_config *config);
+
+/* Finalize an instruction flow decoder. */
+extern void pt_insn_decoder_fini(struct pt_insn_decoder *decoder);
+
+#endif /* PT_INSN_DECODER_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_last_ip.h b/contrib/processor-trace/libipt/internal/include/pt_last_ip.h
new file mode 100644
index 0000000000000..0f4490db2b607
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_last_ip.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_LAST_IP_H
+#define PT_LAST_IP_H
+
+#include <stdint.h>
+
+struct pt_packet_ip;
+struct pt_config;
+
+
+/* Keeping track of the last-ip in Intel PT packets. */
+struct pt_last_ip {
+ /* The last IP. */
+ uint64_t ip;
+
+ /* Flags governing the handling of IP updates and queries:
+ *
+ * - we have seen an IP update.
+ */
+ uint32_t have_ip:1;
+ /* - the IP has been suppressed in the last update. */
+ uint32_t suppressed:1;
+};
+
+
+/* Initialize (or reset) the last-ip. */
+extern void pt_last_ip_init(struct pt_last_ip *last_ip);
+
+/* Query the last-ip.
+ *
+ * If @ip is not NULL, provides the last-ip in @ip on success.
+ *
+ * Returns zero on success.
+ * Returns -pte_internal if @last_ip is NULL.
+ * Returns -pte_noip if there is no last-ip.
+ * Returns -pte_ip_suppressed if the last-ip has been suppressed.
+ */
+extern int pt_last_ip_query(uint64_t *ip, const struct pt_last_ip *last_ip);
+
+/* Update last-ip.
+ *
+ * Updates @last_ip based on @packet and, if non-null, @config.
+ *
+ * Returns zero on success.
+ * Returns -pte_internal if @last_ip or @packet is NULL.
+ * Returns -pte_bad_packet if @packet appears to be corrupted.
+ */
+extern int pt_last_ip_update_ip(struct pt_last_ip *last_ip,
+ const struct pt_packet_ip *packet,
+ const struct pt_config *config);
+
+#endif /* PT_LAST_IP_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_mapped_section.h b/contrib/processor-trace/libipt/internal/include/pt_mapped_section.h
new file mode 100644
index 0000000000000..7e1016111f887
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_mapped_section.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_MAPPED_SECTION_H
+#define PT_MAPPED_SECTION_H
+
+#include "intel-pt.h"
+#include "pt_section.h"
+
+#include <stdint.h>
+
+
+/* A section mapped into memory. */
+struct pt_mapped_section {
+ /* The section that is mapped. */
+ struct pt_section *section;
+
+ /* The address space into which the section is mapped. */
+ struct pt_asid asid;
+
+ /* The virtual address at which the section is mapped. */
+ uint64_t vaddr;
+
+ /* The offset into the section.
+ *
+ * This is normally zero but when @section is split, @offset is added to
+ * the section/file offset when accessing @section.
+ */
+ uint64_t offset;
+
+ /* The size of the section.
+ *
+ * This is normally @section->size but when @section is split, this is
+ * used to determine the size of the sub-section.
+ */
+ uint64_t size;
+};
+
+
+static inline void pt_msec_init(struct pt_mapped_section *msec,
+ struct pt_section *section,
+ const struct pt_asid *asid,
+ uint64_t vaddr, uint64_t offset, uint64_t size)
+{
+ if (!msec)
+ return;
+
+ msec->section = section;
+ msec->vaddr = vaddr;
+ msec->offset = offset;
+ msec->size = size;
+
+ if (asid)
+ msec->asid = *asid;
+ else
+ pt_asid_init(&msec->asid);
+}
+
+/* Destroy a mapped section - does not free @msec->section. */
+static inline void pt_msec_fini(struct pt_mapped_section *msec)
+{
+ (void) msec;
+
+ /* Nothing to do. */
+}
+
+/* Return the virtual address of the beginning of the memory region. */
+static inline uint64_t pt_msec_begin(const struct pt_mapped_section *msec)
+{
+ if (!msec)
+ return 0ull;
+
+ return msec->vaddr;
+}
+
+/* Return the virtual address one byte past the end of the memory region. */
+static inline uint64_t pt_msec_end(const struct pt_mapped_section *msec)
+{
+ if (!msec)
+ return 0ull;
+
+ return msec->vaddr + msec->size;
+}
+
+/* Return the section/file offset. */
+static inline uint64_t pt_msec_offset(const struct pt_mapped_section *msec)
+{
+ if (!msec)
+ return 0ull;
+
+ return msec->offset;
+}
+
+/* Return the section size. */
+static inline uint64_t pt_msec_size(const struct pt_mapped_section *msec)
+{
+ if (!msec)
+ return 0ull;
+
+ return msec->size;
+}
+
+/* Return the underlying section. */
+static inline struct pt_section *
+pt_msec_section(const struct pt_mapped_section *msec)
+{
+ return msec->section;
+}
+
+/* Return an identifier for the address-space the section is mapped into. */
+static inline const struct pt_asid *
+pt_msec_asid(const struct pt_mapped_section *msec)
+{
+ if (!msec)
+ return NULL;
+
+ return &msec->asid;
+}
+
+/* Translate a section/file offset into a virtual address. */
+static inline uint64_t pt_msec_map(const struct pt_mapped_section *msec,
+ uint64_t offset)
+{
+ return (offset - msec->offset) + msec->vaddr;
+}
+
+/* Translate a virtual address into a section/file offset. */
+static inline uint64_t pt_msec_unmap(const struct pt_mapped_section *msec,
+ uint64_t vaddr)
+{
+ return (vaddr - msec->vaddr) + msec->offset;
+}
+
+/* Read memory from a mapped section.
+ *
+ * The caller must check @msec->asid.
+ * The caller must ensure that @msec->section is mapped.
+ *
+ * Returns the number of bytes read on success.
+ * Returns a negative error code otherwise.
+ */
+static inline int pt_msec_read(const struct pt_mapped_section *msec,
+ uint8_t *buffer, uint16_t size,
+ uint64_t vaddr)
+{
+ struct pt_section *section;
+ uint64_t begin, end, mbegin, mend, offset;
+
+ if (!msec)
+ return -pte_internal;
+
+ begin = vaddr;
+ end = begin + size;
+ if (end < begin)
+ end = UINT64_MAX;
+
+ mbegin = pt_msec_begin(msec);
+ mend = pt_msec_end(msec);
+
+ if (begin < mbegin || mend <= begin)
+ return -pte_nomap;
+
+ if (mend < end)
+ end = mend;
+
+ size = (uint16_t) (end - begin);
+
+ section = pt_msec_section(msec);
+ offset = pt_msec_unmap(msec, begin);
+
+ return pt_section_read(section, buffer, size, offset);
+}
+
+#endif /* PT_MAPPED_SECTION_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_msec_cache.h b/contrib/processor-trace/libipt/internal/include/pt_msec_cache.h
new file mode 100644
index 0000000000000..43f3813bfbd7c
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_msec_cache.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_MSEC_CACHE_H
+#define PT_MSEC_CACHE_H
+
+#include "pt_mapped_section.h"
+
+#include "intel-pt.h"
+
+
+/* A single-entry mapped section cache.
+ *
+ * The cached section is implicitly mapped and unmapped. The cache is not
+ * thread-safe.
+ */
+struct pt_msec_cache {
+ /* The cached section.
+ *
+ * The cache is valid if and only if @msec.section is not NULL.
+ *
+ * It needs to be unmapped and put. Use pt_blk_scache_invalidate() to
+ * release the cached section and to invalidate the cache.
+ */
+ struct pt_mapped_section msec;
+
+ /* The section identifier. */
+ int isid;
+};
+
+/* Initialize the cache. */
+extern int pt_msec_cache_init(struct pt_msec_cache *cache);
+
+/* Finalize the cache. */
+extern void pt_msec_cache_fini(struct pt_msec_cache *cache);
+
+/* Invalidate the cache. */
+extern int pt_msec_cache_invalidate(struct pt_msec_cache *cache);
+
+/* Read the cached section.
+ *
+ * If @cache is not empty and @image would find it when looking up @vaddr in
+ * @*pmsec->asid, provide a pointer to the cached section in @pmsec and return
+ * its image section identifier.
+ *
+ * The provided pointer remains valid until @cache is invalidated.
+ *
+ * Returns @*pmsec's isid on success, a negative pt_error_code otherwise.
+ */
+extern int pt_msec_cache_read(struct pt_msec_cache *cache,
+ const struct pt_mapped_section **pmsec,
+ struct pt_image *image, uint64_t vaddr);
+
+/* Fill the cache.
+ *
+ * Look up @vaddr in @asid in @image and cache as well as provide the found
+ * section in @pmsec and return its image section identifier.
+ *
+ * Invalidates @cache.
+ *
+ * The provided pointer remains valid until @cache is invalidated.
+ *
+ * Returns @*pmsec's isid on success, a negative pt_error_code otherwise.
+ */
+extern int pt_msec_cache_fill(struct pt_msec_cache *cache,
+ const struct pt_mapped_section **pmsec,
+ struct pt_image *image,
+ const struct pt_asid *asid, uint64_t vaddr);
+
+#endif /* PT_MSEC_CACHE_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_opcodes.h b/contrib/processor-trace/libipt/internal/include/pt_opcodes.h
new file mode 100644
index 0000000000000..93eab79a80ec2
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_opcodes.h
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_OPCODES_H
+#define PT_OPCODES_H
+
+
+/* A one byte opcode. */
+enum pt_opcode {
+ pt_opc_pad = 0x00,
+ pt_opc_ext = 0x02,
+ pt_opc_psb = pt_opc_ext,
+ pt_opc_tip = 0x0d,
+ pt_opc_tnt_8 = 0x00,
+ pt_opc_tip_pge = 0x11,
+ pt_opc_tip_pgd = 0x01,
+ pt_opc_fup = 0x1d,
+ pt_opc_mode = 0x99,
+ pt_opc_tsc = 0x19,
+ pt_opc_mtc = 0x59,
+ pt_opc_cyc = 0x03,
+
+ /* A free opcode to trigger a decode fault. */
+ pt_opc_bad = 0xd9
+};
+
+/* A one byte extension code for ext opcodes. */
+enum pt_ext_code {
+ pt_ext_psb = 0x82,
+ pt_ext_tnt_64 = 0xa3,
+ pt_ext_pip = 0x43,
+ pt_ext_ovf = 0xf3,
+ pt_ext_psbend = 0x23,
+ pt_ext_cbr = 0x03,
+ pt_ext_tma = 0x73,
+ pt_ext_stop = 0x83,
+ pt_ext_vmcs = 0xc8,
+ pt_ext_ext2 = 0xc3,
+ pt_ext_exstop = 0x62,
+ pt_ext_exstop_ip = 0xe2,
+ pt_ext_mwait = 0xc2,
+ pt_ext_pwre = 0x22,
+ pt_ext_pwrx = 0xa2,
+ pt_ext_ptw = 0x12,
+
+ pt_ext_bad = 0x04
+};
+
+/* A one byte extension 2 code for ext2 extension opcodes. */
+enum pt_ext2_code {
+ pt_ext2_mnt = 0x88,
+
+ pt_ext2_bad = 0x00
+};
+
+/* A one byte opcode mask. */
+enum pt_opcode_mask {
+ pt_opm_tip = 0x1f,
+ pt_opm_tnt_8 = 0x01,
+ pt_opm_tnt_8_shr = 1,
+ pt_opm_fup = pt_opm_tip,
+
+ /* The bit mask for the compression bits in the opcode. */
+ pt_opm_ipc = 0xe0,
+
+ /* The shift right value for ipc bits. */
+ pt_opm_ipc_shr = 5,
+
+ /* The bit mask for the compression bits after shifting. */
+ pt_opm_ipc_shr_mask = 0x7,
+
+ /* Shift counts and masks for decoding the cyc packet. */
+ pt_opm_cyc = 0x03,
+ pt_opm_cyc_ext = 0x04,
+ pt_opm_cyc_bits = 0xf8,
+ pt_opm_cyc_shr = 3,
+ pt_opm_cycx_ext = 0x01,
+ pt_opm_cycx_shr = 1,
+
+ /* The bit mask for the IP bit in the exstop packet. */
+ pt_opm_exstop_ip = 0x80,
+
+ /* The PTW opcode. */
+ pt_opm_ptw = 0x1f,
+
+ /* The bit mask for the IP bit in the ptw packet. */
+ pt_opm_ptw_ip = 0x80,
+
+ /* The bit mask and shr value for the payload bytes field in ptw. */
+ pt_opm_ptw_pb = 0x60,
+ pt_opm_ptw_pb_shr = 5,
+
+ /* The bit mask for the payload bytes field in ptw after shifting. */
+ pt_opm_ptw_pb_shr_mask = 0x3
+};
+
+/* The size of the various opcodes in bytes. */
+enum pt_opcode_size {
+ pt_opcs_pad = 1,
+ pt_opcs_tip = 1,
+ pt_opcs_tip_pge = 1,
+ pt_opcs_tip_pgd = 1,
+ pt_opcs_fup = 1,
+ pt_opcs_tnt_8 = 1,
+ pt_opcs_mode = 1,
+ pt_opcs_tsc = 1,
+ pt_opcs_mtc = 1,
+ pt_opcs_cyc = 1,
+ pt_opcs_psb = 2,
+ pt_opcs_psbend = 2,
+ pt_opcs_ovf = 2,
+ pt_opcs_pip = 2,
+ pt_opcs_tnt_64 = 2,
+ pt_opcs_cbr = 2,
+ pt_opcs_tma = 2,
+ pt_opcs_stop = 2,
+ pt_opcs_vmcs = 2,
+ pt_opcs_mnt = 3,
+ pt_opcs_exstop = 2,
+ pt_opcs_mwait = 2,
+ pt_opcs_pwre = 2,
+ pt_opcs_pwrx = 2,
+ pt_opcs_ptw = 2
+};
+
+/* The psb magic payload.
+ *
+ * The payload is a repeating 2-byte pattern.
+ */
+enum pt_psb_pattern {
+ /* The high and low bytes in the pattern. */
+ pt_psb_hi = pt_opc_psb,
+ pt_psb_lo = pt_ext_psb,
+
+ /* Various combinations of the above parts. */
+ pt_psb_lohi = pt_psb_lo | pt_psb_hi << 8,
+ pt_psb_hilo = pt_psb_hi | pt_psb_lo << 8,
+
+ /* The repeat count of the payload, not including opc and ext. */
+ pt_psb_repeat_count = 7,
+
+ /* The size of the repeated pattern in bytes. */
+ pt_psb_repeat_size = 2
+};
+
+/* The payload details. */
+enum pt_payload {
+ /* The shift counts for post-processing the PIP payload. */
+ pt_pl_pip_shr = 1,
+ pt_pl_pip_shl = 5,
+
+ /* The size of a PIP payload in bytes. */
+ pt_pl_pip_size = 6,
+
+ /* The non-root bit in the first byte of the PIP payload. */
+ pt_pl_pip_nr = 0x01,
+
+ /* The size of a 8bit TNT packet's payload in bits. */
+ pt_pl_tnt_8_bits = 8 - pt_opm_tnt_8_shr,
+
+ /* The size of a 64bit TNT packet's payload in bytes. */
+ pt_pl_tnt_64_size = 6,
+
+ /* The size of a 64bit TNT packet's payload in bits. */
+ pt_pl_tnt_64_bits = 48,
+
+ /* The size of a TSC packet's payload in bytes and in bits. */
+ pt_pl_tsc_size = 7,
+ pt_pl_tsc_bit_size = pt_pl_tsc_size * 8,
+
+ /* The size of a CBR packet's payload in bytes. */
+ pt_pl_cbr_size = 2,
+
+ /* The size of a PSB packet's payload in bytes. */
+ pt_pl_psb_size = pt_psb_repeat_count * pt_psb_repeat_size,
+
+ /* The size of a MODE packet's payload in bytes. */
+ pt_pl_mode_size = 1,
+
+ /* The size of an IP packet's payload with update-16 compression. */
+ pt_pl_ip_upd16_size = 2,
+
+ /* The size of an IP packet's payload with update-32 compression. */
+ pt_pl_ip_upd32_size = 4,
+
+ /* The size of an IP packet's payload with update-48 compression. */
+ pt_pl_ip_upd48_size = 6,
+
+ /* The size of an IP packet's payload with sext-48 compression. */
+ pt_pl_ip_sext48_size = 6,
+
+ /* The size of an IP packet's payload with full-ip compression. */
+ pt_pl_ip_full_size = 8,
+
+ /* Byte locations, sizes, and masks for processing TMA packets. */
+ pt_pl_tma_size = 5,
+ pt_pl_tma_ctc_size = 2,
+ pt_pl_tma_ctc_bit_size = pt_pl_tma_ctc_size * 8,
+ pt_pl_tma_ctc_0 = 2,
+ pt_pl_tma_ctc_1 = 3,
+ pt_pl_tma_ctc_mask = (1 << pt_pl_tma_ctc_bit_size) - 1,
+ pt_pl_tma_fc_size = 2,
+ pt_pl_tma_fc_bit_size = 9,
+ pt_pl_tma_fc_0 = 5,
+ pt_pl_tma_fc_1 = 6,
+ pt_pl_tma_fc_mask = (1 << pt_pl_tma_fc_bit_size) - 1,
+
+ /* The size of a MTC packet's payload in bytes and in bits. */
+ pt_pl_mtc_size = 1,
+ pt_pl_mtc_bit_size = pt_pl_mtc_size * 8,
+
+ /* A mask for the MTC payload bits. */
+ pt_pl_mtc_mask = (1 << pt_pl_mtc_bit_size) - 1,
+
+ /* The maximal payload size in bytes of a CYC packet. */
+ pt_pl_cyc_max_size = 15,
+
+ /* The size of a VMCS packet's payload in bytes. */
+ pt_pl_vmcs_size = 5,
+
+ /* The shift counts for post-processing the VMCS payload. */
+ pt_pl_vmcs_shl = 12,
+
+ /* The size of a MNT packet's payload in bytes. */
+ pt_pl_mnt_size = 8,
+
+ /* The bit-mask for the IP bit in the EXSTOP opcode extension. */
+ pt_pl_exstop_ip_mask = 0x80,
+
+ /* The size of the hints field in the MWAIT payload in bytes. */
+ pt_pl_mwait_hints_size = 4,
+
+ /* The size of the extensions field in the MWAIT payload in bytes. */
+ pt_pl_mwait_ext_size = 4,
+
+ /* The size of the MWAIT payload in bytes. */
+ pt_pl_mwait_size = pt_pl_mwait_hints_size + pt_pl_mwait_ext_size,
+
+ /* The size of the PWRE payload in bytes. */
+ pt_pl_pwre_size = 2,
+
+ /* The bit-mask for the h/w bit in the PWRE payload. */
+ pt_pl_pwre_hw_mask = 0x8,
+
+ /* The bit-mask for the resolved thread sub C-state in the PWRE
+ * payload.
+ */
+ pt_pl_pwre_sub_state_mask = 0xf00,
+
+ /* The shift right value for the resolved thread sub C-state in the
+ * PWRE payload.
+ */
+ pt_pl_pwre_sub_state_shr = 8,
+
+ /* The bit-mask for the resolved thread C-state in the PWRE payload. */
+ pt_pl_pwre_state_mask = 0xf000,
+
+ /* The shift right value for the resolved thread C-state in the
+ * PWRE payload.
+ */
+ pt_pl_pwre_state_shr = 12,
+
+ /* The size of the PWRX payload in bytes. */
+ pt_pl_pwrx_size = 5,
+
+ /* The bit-mask for the deepest core C-state in the PWRX payload. */
+ pt_pl_pwrx_deepest_mask = 0xf,
+
+ /* The shift right value for the deepest core C-state in the PWRX
+ * payload.
+ */
+ pt_pl_pwrx_deepest_shr = 0,
+
+ /* The bit-mask for the last core C-state in the PWRX payload. */
+ pt_pl_pwrx_last_mask = 0xf0,
+
+ /* The shift right value for the last core C-state in the PWRX
+ * payload.
+ */
+ pt_pl_pwrx_last_shr = 4,
+
+ /* The bit-mask for the wake reason in the PWRX payload. */
+ pt_pl_pwrx_wr_mask = 0xf00,
+
+ /* The shift right value for the wake reason in the PWRX payload. */
+ pt_pl_pwrx_wr_shr = 8,
+
+ /* The bit-mask for the interrupt wake reason in the PWRX payload. */
+ pt_pl_pwrx_wr_int = 0x100,
+
+ /* The bit-mask for the store wake reason in the PWRX payload. */
+ pt_pl_pwrx_wr_store = 0x400,
+
+ /* The bit-mask for the autonomous wake reason in the PWRX payload. */
+ pt_pl_pwrx_wr_hw = 0x800
+};
+
+/* Mode packet masks. */
+enum pt_mode_mask {
+ pt_mom_leaf = 0xe0,
+ pt_mom_leaf_shr = 5,
+ pt_mom_bits = 0x1f
+};
+
+/* Mode packet bits. */
+enum pt_mode_bit {
+ /* mode.exec */
+ pt_mob_exec_csl = 0x01,
+ pt_mob_exec_csd = 0x02,
+
+ /* mode.tsx */
+ pt_mob_tsx_intx = 0x01,
+ pt_mob_tsx_abrt = 0x02
+};
+
+/* The size of the various packets in bytes. */
+enum pt_packet_size {
+ ptps_pad = pt_opcs_pad,
+ ptps_tnt_8 = pt_opcs_tnt_8,
+ ptps_mode = pt_opcs_mode + pt_pl_mode_size,
+ ptps_tsc = pt_opcs_tsc + pt_pl_tsc_size,
+ ptps_mtc = pt_opcs_mtc + pt_pl_mtc_size,
+ ptps_psb = pt_opcs_psb + pt_pl_psb_size,
+ ptps_psbend = pt_opcs_psbend,
+ ptps_ovf = pt_opcs_ovf,
+ ptps_pip = pt_opcs_pip + pt_pl_pip_size,
+ ptps_tnt_64 = pt_opcs_tnt_64 + pt_pl_tnt_64_size,
+ ptps_cbr = pt_opcs_cbr + pt_pl_cbr_size,
+ ptps_tip_supp = pt_opcs_tip,
+ ptps_tip_upd16 = pt_opcs_tip + pt_pl_ip_upd16_size,
+ ptps_tip_upd32 = pt_opcs_tip + pt_pl_ip_upd32_size,
+ ptps_tip_upd48 = pt_opcs_tip + pt_pl_ip_upd48_size,
+ ptps_tip_sext48 = pt_opcs_tip + pt_pl_ip_sext48_size,
+ ptps_tip_full = pt_opcs_tip + pt_pl_ip_full_size,
+ ptps_tip_pge_supp = pt_opcs_tip_pge,
+ ptps_tip_pge_upd16 = pt_opcs_tip_pge + pt_pl_ip_upd16_size,
+ ptps_tip_pge_upd32 = pt_opcs_tip_pge + pt_pl_ip_upd32_size,
+ ptps_tip_pge_upd48 = pt_opcs_tip_pge + pt_pl_ip_upd48_size,
+ ptps_tip_pge_sext48 = pt_opcs_tip_pge + pt_pl_ip_sext48_size,
+ ptps_tip_pge_full = pt_opcs_tip_pge + pt_pl_ip_full_size,
+ ptps_tip_pgd_supp = pt_opcs_tip_pgd,
+ ptps_tip_pgd_upd16 = pt_opcs_tip_pgd + pt_pl_ip_upd16_size,
+ ptps_tip_pgd_upd32 = pt_opcs_tip_pgd + pt_pl_ip_upd32_size,
+ ptps_tip_pgd_upd48 = pt_opcs_tip_pgd + pt_pl_ip_upd48_size,
+ ptps_tip_pgd_sext48 = pt_opcs_tip_pgd + pt_pl_ip_sext48_size,
+ ptps_tip_pgd_full = pt_opcs_tip_pgd + pt_pl_ip_full_size,
+ ptps_fup_supp = pt_opcs_fup,
+ ptps_fup_upd16 = pt_opcs_fup + pt_pl_ip_upd16_size,
+ ptps_fup_upd32 = pt_opcs_fup + pt_pl_ip_upd32_size,
+ ptps_fup_upd48 = pt_opcs_fup + pt_pl_ip_upd48_size,
+ ptps_fup_sext48 = pt_opcs_fup + pt_pl_ip_sext48_size,
+ ptps_fup_full = pt_opcs_fup + pt_pl_ip_full_size,
+ ptps_tma = pt_opcs_tma + pt_pl_tma_size,
+ ptps_stop = pt_opcs_stop,
+ ptps_vmcs = pt_opcs_vmcs + pt_pl_vmcs_size,
+ ptps_mnt = pt_opcs_mnt + pt_pl_mnt_size,
+ ptps_exstop = pt_opcs_exstop,
+ ptps_mwait = pt_opcs_mwait + pt_pl_mwait_size,
+ ptps_pwre = pt_opcs_pwre + pt_pl_pwre_size,
+ ptps_pwrx = pt_opcs_pwrx + pt_pl_pwrx_size,
+ ptps_ptw_32 = pt_opcs_ptw + 4,
+ ptps_ptw_64 = pt_opcs_ptw + 8
+};
+
+/* Supported address range configurations. */
+enum pt_addr_cfg {
+ pt_addr_cfg_disabled = 0,
+ pt_addr_cfg_filter = 1,
+ pt_addr_cfg_stop = 2
+};
+
+#endif /* PT_OPCODES_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_packet.h b/contrib/processor-trace/libipt/internal/include/pt_packet.h
new file mode 100644
index 0000000000000..ed4fc63c4600c
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_packet.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_PACKET_H
+#define PT_PACKET_H
+
+#include <stdint.h>
+
+struct pt_config;
+struct pt_packet;
+struct pt_packet_ip;
+struct pt_packet_tnt;
+struct pt_packet_pip;
+struct pt_packet_mode;
+struct pt_packet_tsc;
+struct pt_packet_cbr;
+struct pt_packet_tma;
+struct pt_packet_mtc;
+struct pt_packet_cyc;
+struct pt_packet_vmcs;
+struct pt_packet_mnt;
+struct pt_packet_exstop;
+struct pt_packet_mwait;
+struct pt_packet_pwre;
+struct pt_packet_pwrx;
+struct pt_packet_ptw;
+
+
+/* Read the payload of an Intel PT packet.
+ *
+ * Reads the payload of the packet starting at @pos into @packet.
+ *
+ * For pt_pkt_read_psb(), the @packet parameter is omitted; the function
+ * validates that the payload matches the expected PSB pattern.
+ *
+ * Decoding an unknown packet uses @config's decode callback. If the callback
+ * is NULL, pt_pkt_read_unknown() returns -pte_bad_opc.
+ *
+ * Beware that the packet opcode is not checked. The caller is responsible
+ * for checking the opcode and calling the correct packet read function.
+ *
+ * Returns the packet size on success, a negative error code otherwise.
+ * Returns -pte_bad_packet if the packet payload is corrupt.
+ * Returns -pte_eos if the packet does not fit into the trace buffer.
+ * Returns -pte_internal if @packet, @pos, or @config is NULL.
+ */
+extern int pt_pkt_read_unknown(struct pt_packet *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_psb(const uint8_t *pos, const struct pt_config *config);
+extern int pt_pkt_read_ip(struct pt_packet_ip *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_tnt_8(struct pt_packet_tnt *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_tnt_64(struct pt_packet_tnt *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_pip(struct pt_packet_pip *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_mode(struct pt_packet_mode *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_tsc(struct pt_packet_tsc *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_cbr(struct pt_packet_cbr *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_tma(struct pt_packet_tma *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_mtc(struct pt_packet_mtc *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_cyc(struct pt_packet_cyc *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_vmcs(struct pt_packet_vmcs *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_mnt(struct pt_packet_mnt *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_exstop(struct pt_packet_exstop *packet,
+ const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_mwait(struct pt_packet_mwait *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_pwre(struct pt_packet_pwre *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_pwrx(struct pt_packet_pwrx *packet, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_pkt_read_ptw(struct pt_packet_ptw *packet, const uint8_t *pos,
+ const struct pt_config *config);
+
+#endif /* PT_PACKET_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_packet_decoder.h b/contrib/processor-trace/libipt/internal/include/pt_packet_decoder.h
new file mode 100644
index 0000000000000..2c114310c84b0
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_packet_decoder.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_PACKET_DECODER_H
+#define PT_PACKET_DECODER_H
+
+#include "intel-pt.h"
+
+
+/* An Intel PT packet decoder. */
+struct pt_packet_decoder {
+ /* The decoder configuration. */
+ struct pt_config config;
+
+ /* The current position in the trace buffer. */
+ const uint8_t *pos;
+
+ /* The position of the last PSB packet. */
+ const uint8_t *sync;
+};
+
+
+/* Initialize the packet decoder.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern int pt_pkt_decoder_init(struct pt_packet_decoder *,
+ const struct pt_config *);
+
+/* Finalize the packet decoder. */
+extern void pt_pkt_decoder_fini(struct pt_packet_decoder *);
+
+
+/* Decoder functions for the packet decoder. */
+extern int pt_pkt_decode_unknown(struct pt_packet_decoder *,
+ struct pt_packet *);
+extern int pt_pkt_decode_pad(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_psb(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_tip(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_tnt_8(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_tnt_64(struct pt_packet_decoder *,
+ struct pt_packet *);
+extern int pt_pkt_decode_tip_pge(struct pt_packet_decoder *,
+ struct pt_packet *);
+extern int pt_pkt_decode_tip_pgd(struct pt_packet_decoder *,
+ struct pt_packet *);
+extern int pt_pkt_decode_fup(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_pip(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_ovf(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_mode(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_psbend(struct pt_packet_decoder *,
+ struct pt_packet *);
+extern int pt_pkt_decode_tsc(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_cbr(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_tma(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_mtc(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_cyc(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_stop(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_vmcs(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_mnt(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_exstop(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_mwait(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_pwre(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_pwrx(struct pt_packet_decoder *, struct pt_packet *);
+extern int pt_pkt_decode_ptw(struct pt_packet_decoder *, struct pt_packet *);
+
+#endif /* PT_PACKET_DECODER_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_query_decoder.h b/contrib/processor-trace/libipt/internal/include/pt_query_decoder.h
new file mode 100644
index 0000000000000..355338feab1cb
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_query_decoder.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_QUERY_DECODER_H
+#define PT_QUERY_DECODER_H
+
+#include "pt_last_ip.h"
+#include "pt_tnt_cache.h"
+#include "pt_time.h"
+#include "pt_event_queue.h"
+
+#include "intel-pt.h"
+
+struct pt_decoder_function;
+
+
+/* An Intel PT query decoder. */
+struct pt_query_decoder {
+ /* The decoder configuration. */
+ struct pt_config config;
+
+ /* The current position in the trace buffer. */
+ const uint8_t *pos;
+
+ /* The position of the last PSB packet. */
+ const uint8_t *sync;
+
+ /* The decoding function for the next packet. */
+ const struct pt_decoder_function *next;
+
+ /* The last-ip. */
+ struct pt_last_ip ip;
+
+ /* The cached tnt indicators. */
+ struct pt_tnt_cache tnt;
+
+ /* Timing information. */
+ struct pt_time time;
+
+ /* The time at the last query (before reading ahead). */
+ struct pt_time last_time;
+
+ /* Timing calibration. */
+ struct pt_time_cal tcal;
+
+ /* Pending (incomplete) events. */
+ struct pt_event_queue evq;
+
+ /* The current event. */
+ struct pt_event *event;
+
+ /* A collection of flags relevant for decoding:
+ *
+ * - tracing is enabled.
+ */
+ uint32_t enabled:1;
+
+ /* - consume the current packet. */
+ uint32_t consume_packet:1;
+};
+
+/* Initialize the query decoder.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern int pt_qry_decoder_init(struct pt_query_decoder *,
+ const struct pt_config *);
+
+/* Finalize the query decoder. */
+extern void pt_qry_decoder_fini(struct pt_query_decoder *);
+
+/* Decoder functions (tracing context). */
+extern int pt_qry_decode_unknown(struct pt_query_decoder *);
+extern int pt_qry_decode_pad(struct pt_query_decoder *);
+extern int pt_qry_decode_psb(struct pt_query_decoder *);
+extern int pt_qry_decode_tip(struct pt_query_decoder *);
+extern int pt_qry_decode_tnt_8(struct pt_query_decoder *);
+extern int pt_qry_decode_tnt_64(struct pt_query_decoder *);
+extern int pt_qry_decode_tip_pge(struct pt_query_decoder *);
+extern int pt_qry_decode_tip_pgd(struct pt_query_decoder *);
+extern int pt_qry_decode_fup(struct pt_query_decoder *);
+extern int pt_qry_decode_pip(struct pt_query_decoder *);
+extern int pt_qry_decode_ovf(struct pt_query_decoder *);
+extern int pt_qry_decode_mode(struct pt_query_decoder *);
+extern int pt_qry_decode_psbend(struct pt_query_decoder *);
+extern int pt_qry_decode_tsc(struct pt_query_decoder *);
+extern int pt_qry_header_tsc(struct pt_query_decoder *);
+extern int pt_qry_decode_cbr(struct pt_query_decoder *);
+extern int pt_qry_header_cbr(struct pt_query_decoder *);
+extern int pt_qry_decode_tma(struct pt_query_decoder *);
+extern int pt_qry_decode_mtc(struct pt_query_decoder *);
+extern int pt_qry_decode_cyc(struct pt_query_decoder *);
+extern int pt_qry_decode_stop(struct pt_query_decoder *);
+extern int pt_qry_decode_vmcs(struct pt_query_decoder *);
+extern int pt_qry_decode_mnt(struct pt_query_decoder *);
+extern int pt_qry_decode_exstop(struct pt_query_decoder *);
+extern int pt_qry_decode_mwait(struct pt_query_decoder *);
+extern int pt_qry_decode_pwre(struct pt_query_decoder *);
+extern int pt_qry_decode_pwrx(struct pt_query_decoder *);
+extern int pt_qry_decode_ptw(struct pt_query_decoder *);
+
+/* Decoder functions (header context). */
+extern int pt_qry_header_fup(struct pt_query_decoder *);
+extern int pt_qry_header_pip(struct pt_query_decoder *);
+extern int pt_qry_header_mode(struct pt_query_decoder *);
+extern int pt_qry_header_vmcs(struct pt_query_decoder *);
+extern int pt_qry_header_mnt(struct pt_query_decoder *);
+
+#endif /* PT_QUERY_DECODER_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_retstack.h b/contrib/processor-trace/libipt/internal/include/pt_retstack.h
new file mode 100644
index 0000000000000..c68a782a7dbe2
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_retstack.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_RETSTACK_H
+#define PT_RETSTACK_H
+
+#include <stdint.h>
+
+
+/* The size of the call/return stack in number of entries. */
+enum {
+ pt_retstack_size = 64
+};
+
+/* A stack of return addresses used for return compression. */
+struct pt_retstack {
+ /* The stack of return addresses.
+ *
+ * We use one additional entry in order to distinguish a full from
+ * an empty stack.
+ */
+ uint64_t stack[pt_retstack_size + 1];
+
+ /* The top of the stack. */
+ uint8_t top;
+
+ /* The bottom of the stack. */
+ uint8_t bottom;
+};
+
+/* Initialize (or reset) a call/return stack. */
+extern void pt_retstack_init(struct pt_retstack *);
+
+/* Test a call/return stack for emptiness.
+ *
+ * Returns zero if @retstack contains at least one element.
+ * Returns a positive integer if @retstack is empty.
+ * Returns -pte_invalid if @retstack is NULL.
+ */
+extern int pt_retstack_is_empty(const struct pt_retstack *retstack);
+
+/* Pop and return the topmost IP.
+ *
+ * If @ip is not NULL, provides the topmost return address on success.
+ * If @retstack is not empty, pops the topmost return address on success.
+ *
+ * Returns zero on success.
+ * Returns -pte_invalid if @retstack is NULL.
+ * Returns -pte_noip if @retstack is empty.
+ */
+extern int pt_retstack_pop(struct pt_retstack *retstack, uint64_t *ip);
+
+/* Push a return address onto the stack.
+ *
+ * Pushes @ip onto @retstack.
+ * If @retstack is full, drops the oldest return address.
+ *
+ * Returns zero on success.
+ */
+extern int pt_retstack_push(struct pt_retstack *retstack, uint64_t ip);
+
+#endif /* PT_RETSTACK_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_section.h b/contrib/processor-trace/libipt/internal/include/pt_section.h
new file mode 100644
index 0000000000000..df9200e197703
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_section.h
@@ -0,0 +1,392 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SECTION_H
+#define PT_SECTION_H
+
+#include <stdint.h>
+#include <stddef.h>
+
+#if defined(FEATURE_THREADS)
+# include <threads.h>
+#endif /* defined(FEATURE_THREADS) */
+
+#include "intel-pt.h"
+
+struct pt_block_cache;
+
+
+/* A section of contiguous memory loaded from a file. */
+struct pt_section {
+ /* The name of the file. */
+ char *filename;
+
+ /* The offset into the file. */
+ uint64_t offset;
+
+ /* The (adjusted) size in bytes. The size is truncated to match the
+ * actual file size.
+ */
+ uint64_t size;
+
+ /* A pointer to OS-specific file status for detecting changes.
+ *
+ * The status is initialized on first pt_section_map() and will be
+ * left in the section until the section is destroyed. This field
+ * is owned by the OS-specific mmap-based section implementation.
+ */
+ void *status;
+
+ /* A pointer to implementation-specific mapping information - NULL if
+ * the section is currently not mapped.
+ *
+ * This field is set in pt_section_map() and owned by the mapping
+ * implementation.
+ */
+ void *mapping;
+
+ /* A pointer to an optional block cache.
+ *
+ * The cache is created on request and destroyed implicitly when the
+ * section is unmapped.
+ *
+ * We read this field without locking and only lock the section in order
+ * to install the block cache.
+ *
+ * We rely on guaranteed atomic operations as specified in section 8.1.1
+ * in Volume 3A of the Intel(R) Software Developer's Manual at
+ * http://www.intel.com/sdm.
+ */
+ struct pt_block_cache *bcache;
+
+ /* A pointer to the iscache attached to this section.
+ *
+ * The pointer is initialized when the iscache attaches and cleared when
+ * it detaches again. There can be at most one iscache attached to this
+ * section at any time.
+ *
+ * In addition to attaching, the iscache will need to obtain a reference
+ * to the section, which it needs to drop again after detaching.
+ */
+ struct pt_image_section_cache *iscache;
+
+ /* A pointer to the unmap function - NULL if the section is currently
+ * not mapped.
+ *
+ * This field is set in pt_section_map() and owned by the mapping
+ * implementation.
+ */
+ int (*unmap)(struct pt_section *sec);
+
+ /* A pointer to the read function - NULL if the section is currently
+ * not mapped.
+ *
+ * This field is set in pt_section_map() and owned by the mapping
+ * implementation.
+ */
+ int (*read)(const struct pt_section *sec, uint8_t *buffer,
+ uint16_t size, uint64_t offset);
+
+ /* A pointer to the memsize function - NULL if the section is currently
+ * not mapped.
+ *
+ * This field is set in pt_section_map() and owned by the mapping
+ * implementation.
+ */
+ int (*memsize)(const struct pt_section *section, uint64_t *size);
+
+#if defined(FEATURE_THREADS)
+ /* A lock protecting this section.
+ *
+ * Most operations do not require the section to be locked. All
+ * actual locking should be handled by pt_section_* functions.
+ */
+ mtx_t lock;
+
+ /* A lock protecting the @iscache and @acount fields.
+ *
+ * We need separate locks to protect against a deadlock scenario when
+ * the iscache is mapping or unmapping this section.
+ *
+ * The attach lock must not be taken while holding the section lock; the
+ * other way round is OK.
+ */
+ mtx_t alock;
+#endif /* defined(FEATURE_THREADS) */
+
+ /* The number of current users. The last user destroys the section. */
+ uint16_t ucount;
+
+ /* The number of attaches. This must be <= @ucount. */
+ uint16_t acount;
+
+ /* The number of current mappers. The last unmaps the section. */
+ uint16_t mcount;
+};
+
+/* Create a section.
+ *
+ * The returned section describes the contents of @file starting at @offset
+ * for @size bytes.
+ *
+ * If @file is shorter than the requested @size, the section is silently
+ * truncated to the size of @file.
+ *
+ * If @offset lies beyond the end of @file, no section is created.
+ *
+ * The returned section is not mapped and starts with a user count of one and
+ * instruction caching enabled.
+ *
+ * Returns a new section on success, NULL otherwise.
+ */
+extern struct pt_section *pt_mk_section(const char *file, uint64_t offset,
+ uint64_t size);
+
+/* Lock a section.
+ *
+ * Locks @section. The section must not be locked.
+ *
+ * Returns a new section on success, NULL otherwise.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_lock(struct pt_section *section);
+
+/* Unlock a section.
+ *
+ * Unlocks @section. The section must be locked.
+ *
+ * Returns a new section on success, NULL otherwise.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_unlock(struct pt_section *section);
+
+/* Add another user.
+ *
+ * Increments the user count of @section.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_overflow if the user count would overflow.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_get(struct pt_section *section);
+
+/* Remove a user.
+ *
+ * Decrements the user count of @section. Destroys the section if the
+ * count reaches zero.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_internal if the user count is already zero.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_put(struct pt_section *section);
+
+/* Attaches the image section cache user.
+ *
+ * Similar to pt_section_get() but sets @section->iscache to @iscache.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @iscache is NULL.
+ * Returns -pte_internal if a different cache is already attached.
+ * Returns -pte_overflow if the attach count would overflow.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_attach(struct pt_section *section,
+ struct pt_image_section_cache *iscache);
+
+/* Detaches the image section cache user.
+ *
+ * Similar to pt_section_put() but clears @section->iscache.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @iscache is NULL.
+ * Returns -pte_internal if the attach count is already zero.
+ * Returns -pte_internal if @section->iscache is not equal to @iscache.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_detach(struct pt_section *section,
+ struct pt_image_section_cache *iscache);
+
+/* Return the filename of @section. */
+extern const char *pt_section_filename(const struct pt_section *section);
+
+/* Return the offset of the section in bytes. */
+extern uint64_t pt_section_offset(const struct pt_section *section);
+
+/* Return the size of the section in bytes. */
+extern uint64_t pt_section_size(const struct pt_section *section);
+
+/* Return the amount of memory currently used by the section in bytes.
+ *
+ * We only consider the amount of memory required for mapping @section; we
+ * ignore the size of the section object itself and the size of the status
+ * object.
+ *
+ * If @section is currently not mapped, the size is zero.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ * Returns -pte_internal if @size of @section is NULL.
+ */
+extern int pt_section_memsize(struct pt_section *section, uint64_t *size);
+
+/* Allocate a block cache.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_nomem if the block cache can't be allocated.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_alloc_bcache(struct pt_section *section);
+
+/* Request block caching.
+ *
+ * The caller must ensure that @section is mapped.
+ */
+static inline int pt_section_request_bcache(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+ if (section->bcache)
+ return 0;
+
+ return pt_section_alloc_bcache(section);
+}
+
+/* Return @section's block cache, if available.
+ *
+ * The caller must ensure that @section is mapped.
+ *
+ * The cache is not use-counted. It is only valid as long as the caller keeps
+ * @section mapped.
+ */
+static inline struct pt_block_cache *
+pt_section_bcache(const struct pt_section *section)
+{
+ if (!section)
+ return NULL;
+
+ return section->bcache;
+}
+
+/* Create the OS-specific file status.
+ *
+ * On success, allocates a status object, provides a pointer to it in @pstatus
+ * and provides the file size in @psize.
+ *
+ * The status object will be free()'ed when its section is.
+ *
+ * This function is implemented in the OS-specific section implementation.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @pstatus, @psize, or @filename is NULL.
+ * Returns -pte_bad_image if @filename can't be opened.
+ * Returns -pte_nomem if the status object can't be allocated.
+ */
+extern int pt_section_mk_status(void **pstatus, uint64_t *psize,
+ const char *filename);
+
+/* Perform on-map maintenance work.
+ *
+ * Notifies an attached image section cache about the mapping of @section.
+ *
+ * This function is called by the OS-specific pt_section_map() implementation
+ * after @section has been successfully mapped and @section has been unlocked.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_on_map_lock(struct pt_section *section);
+
+static inline int pt_section_on_map(struct pt_section *section)
+{
+ if (section && !section->iscache)
+ return 0;
+
+ return pt_section_on_map_lock(section);
+}
+
+/* Map a section.
+ *
+ * Maps @section into memory. Mappings are use-counted. The number of
+ * pt_section_map() calls must match the number of pt_section_unmap()
+ * calls.
+ *
+ * This function is implemented in the OS-specific section implementation.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_bad_image if @section changed or can't be opened.
+ * Returns -pte_bad_lock on any locking error.
+ * Returns -pte_nomem if @section can't be mapped into memory.
+ * Returns -pte_overflow if the map count would overflow.
+ */
+extern int pt_section_map(struct pt_section *section);
+
+/* Share a section mapping.
+ *
+ * Increases the map count for @section without notifying an attached image
+ * section cache.
+ *
+ * This function should only be used by the attached image section cache to
+ * resolve a deadlock scenario when mapping a section it intends to cache.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_internal if @section->mcount is zero.
+ * Returns -pte_bad_lock on any locking error.
+ */
+extern int pt_section_map_share(struct pt_section *section);
+
+/* Unmap a section.
+ *
+ * Unmaps @section from memory.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_bad_lock on any locking error.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_section_unmap(struct pt_section *section);
+
+/* Read memory from a section.
+ *
+ * Reads at most @size bytes from @section at @offset into @buffer. @section
+ * must be mapped.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @buffer are NULL.
+ * Returns -pte_nomap if @offset is beyond the end of the section.
+ */
+extern int pt_section_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset);
+
+#endif /* PT_SECTION_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_section_file.h b/contrib/processor-trace/libipt/internal/include/pt_section_file.h
new file mode 100644
index 0000000000000..9b266dbba0f3c
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_section_file.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SECTION_FILE_H
+#define PT_SECTION_FILE_H
+
+#include <stdio.h>
+#include <stdint.h>
+
+#if defined(FEATURE_THREADS)
+# include <threads.h>
+#endif /* defined(FEATURE_THREADS) */
+
+struct pt_section;
+
+
+/* File-based section mapping information. */
+struct pt_sec_file_mapping {
+ /* The FILE pointer. */
+ FILE *file;
+
+ /* The begin and end of the section as offset into @file. */
+ long begin, end;
+
+#if defined(FEATURE_THREADS)
+ /* A lock protecting read access to this file.
+ *
+ * Since we need to first set the file position indication before
+ * we can read, there's a race on the file position.
+ */
+ mtx_t lock;
+#endif /* defined(FEATURE_THREADS) */
+};
+
+
+/* Map a section based on file operations.
+ *
+ * The caller has already opened the file for reading.
+ *
+ * On success, sets @section's mapping, unmap, and read pointers.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @file are NULL.
+ * Returns -pte_invalid if @section can't be mapped.
+ */
+extern int pt_sec_file_map(struct pt_section *section, FILE *file);
+
+/* Unmap a section based on file operations.
+ *
+ * On success, clears @section's mapping, unmap, and read pointers.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_sec_file_unmap(struct pt_section *section);
+
+/* Read memory from a file based section.
+ *
+ * Reads at most @size bytes from @section at @offset into @buffer.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ * Returns -pte_invalid if @section or @buffer are NULL.
+ * Returns -pte_nomap if @offset is beyond the end of the section.
+ */
+extern int pt_sec_file_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset);
+
+/* Compute the memory size of a section based on file operations.
+ *
+ * On success, provides the amount of memory used for mapping @section in bytes
+ * in @size.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @size is NULL.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_sec_file_memsize(const struct pt_section *section,
+ uint64_t *size);
+
+#endif /* PT_SECTION_FILE_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_sync.h b/contrib/processor-trace/libipt/internal/include/pt_sync.h
new file mode 100644
index 0000000000000..8e0c5d527aa19
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_sync.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SYNC_H
+#define PT_SYNC_H
+
+#include <stdint.h>
+
+struct pt_config;
+
+
+/* Synchronize onto the trace stream.
+ *
+ * Search for the next synchronization point in forward or backward direction
+ * starting at @pos using the trace configuration @config.
+ *
+ * On success, stores a pointer to the next synchronization point in @sync.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_internal if @sync, @pos, or @config is NULL.
+ * Returns -pte_nosync if @pos lies outside of @config's buffer.
+ * Returns -pte_eos if no further synchronization point is found.
+ */
+extern int pt_sync_forward(const uint8_t **sync, const uint8_t *pos,
+ const struct pt_config *config);
+extern int pt_sync_backward(const uint8_t **sync, const uint8_t *pos,
+ const struct pt_config *config);
+
+/* Manually synchronize onto the trace stream.
+ *
+ * Validate that @pos is within the bounds of @config's trace buffer and that
+ * there is a synchronization point at @pos.
+ *
+ * On success, stores @pos in @sync.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ *
+ * Returns -pte_eos if @pos is outside of @config's trace buffer.
+ * Returns -pte_internal if @sync, @pos, or @config is NULL.
+ * Returns -pte_bad_packet if there is no PSB at @pos.
+ */
+extern int pt_sync_set(const uint8_t **sync, const uint8_t *pos,
+ const struct pt_config *config);
+
+#endif /* PT_SYNC_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_time.h b/contrib/processor-trace/libipt/internal/include/pt_time.h
new file mode 100644
index 0000000000000..1f3816ae163ac
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_time.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_TIME_H
+#define PT_TIME_H
+
+#include <stdint.h>
+
+struct pt_config;
+struct pt_packet_tsc;
+struct pt_packet_cbr;
+struct pt_packet_tma;
+struct pt_packet_mtc;
+struct pt_packet_cyc;
+
+
+/* Intel(R) Processor Trace timing. */
+struct pt_time {
+ /* The estimated Time Stamp Count. */
+ uint64_t tsc;
+
+ /* The base Time Stamp Count (from TSC and MTC). */
+ uint64_t base;
+
+ /* The estimated Fast Counter. */
+ uint64_t fc;
+
+ /* The adjusted last CTC value (from MTC and TMA). */
+ uint32_t ctc;
+
+ /* The adjusted CTC value when @fc was cleared (from MTC and TMA). */
+ uint32_t ctc_cyc;
+
+ /* The number of lost MTC updates. */
+ uint32_t lost_mtc;
+
+ /* The number of lost CYC updates. */
+ uint32_t lost_cyc;
+
+ /* The core:bus ratio. */
+ uint8_t cbr;
+
+ /* A flag saying whether we have seen a TSC packet. */
+ uint32_t have_tsc:1;
+
+ /* A flag saying whether we have seen a CBR packet. */
+ uint32_t have_cbr:1;
+
+ /* A flag saying whether we have seen a TMA packet. */
+ uint32_t have_tma:1;
+
+ /* A flag saying whether we have seen a MTC packet. */
+ uint32_t have_mtc:1;
+};
+
+/* Initialize (or reset) the time. */
+extern void pt_time_init(struct pt_time *time);
+
+/* Query the current time.
+ *
+ * Provides the estimated Time Stamp Count value in @tsc.
+ *
+ * If @lost_mtc is not NULL, provides the number of lost MTC packets.
+ * If @lost_cyc is not NULL, provides the number of lost CYC packets.
+ *
+ * Returns zero on success; a negative error code, otherwise.
+ * Returns -pte_internal if @tsc or @time is NULL.
+ * Returns -pte_no_time if there has not been a TSC packet.
+ */
+extern int pt_time_query_tsc(uint64_t *tsc, uint32_t *lost_mtc,
+ uint32_t *lost_cyc, const struct pt_time *time);
+
+/* Query the current core:bus ratio.
+ *
+ * Provides the core:bus ratio in @cbr.
+ *
+ * Returns zero on success; a negative error code, otherwise.
+ * Returns -pte_internal if @cbr or @time is NULL.
+ * Returns -pte_no_cbr if there has not been a CBR packet.
+ */
+extern int pt_time_query_cbr(uint32_t *cbr, const struct pt_time *time);
+
+/* Update the time based on an Intel PT packet.
+ *
+ * Returns zero on success.
+ * Returns a negative error code, otherwise.
+ */
+extern int pt_time_update_tsc(struct pt_time *, const struct pt_packet_tsc *,
+ const struct pt_config *);
+extern int pt_time_update_cbr(struct pt_time *, const struct pt_packet_cbr *,
+ const struct pt_config *);
+extern int pt_time_update_tma(struct pt_time *, const struct pt_packet_tma *,
+ const struct pt_config *);
+extern int pt_time_update_mtc(struct pt_time *, const struct pt_packet_mtc *,
+ const struct pt_config *);
+/* @fcr is the fast-counter:cycles ratio obtained by calibration. */
+extern int pt_time_update_cyc(struct pt_time *, const struct pt_packet_cyc *,
+ const struct pt_config *, uint64_t fcr);
+
+
+/* Timing calibration.
+ *
+ * Used for estimating the Fast-Counter:Cycles ratio.
+ *
+ * Ideally, we calibrate by counting CYCs between MTCs. Lacking MTCs, we
+ * use TSC, instead.
+ */
+struct pt_time_cal {
+ /* The estimated fast-counter:cycles ratio. */
+ uint64_t fcr;
+
+ /* The minimal and maximal @fcr values. */
+ uint64_t min_fcr, max_fcr;
+
+ /* The last TSC value.
+ *
+ * Used for calibrating at TSC.
+ */
+ uint64_t tsc;
+
+ /* The number of cycles since the last TSC (from CYC).
+ *
+ * Used for calibrating at TSC.
+ */
+ uint64_t cyc_tsc;
+
+ /* The number of cycles since the last MTC (from CYC).
+ *
+ * Used for calibrating at MTC.
+ */
+ uint64_t cyc_mtc;
+
+ /* The adjusted last CTC value (from MTC).
+ *
+ * Used for calibrating at MTC.
+ */
+ uint32_t ctc;
+
+ /* The number of lost MTC updates since the last successful update. */
+ uint32_t lost_mtc;
+
+ /* A flag saying whether we have seen a MTC packet. */
+ uint32_t have_mtc:1;
+};
+
+enum {
+ /* The amount by which the fcr value is right-shifted.
+ *
+ * Do not shift the value obtained by pt_tcal_fcr() when passing it to
+ * pt_time_update_cyc().
+ * Do shift the value passed to pt_tcal_set_fcr().
+ */
+ pt_tcal_fcr_shr = 8
+};
+
+/* Initialize of reset timing calibration. */
+extern void pt_tcal_init(struct pt_time_cal *tcal);
+
+/* Query the estimated fast-counter:cycles ratio.
+ *
+ * Provides the estimated ratio in @fcr unless -pte_internal or
+ * -pte_no_time is returned.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @fcr or @tcal is NULL.
+ * Returns -pte_no_time if no information is available.
+ */
+extern int pt_tcal_fcr(uint64_t *fcr, const struct pt_time_cal *tcal);
+
+/* Set the fast-counter:cycles ratio.
+ *
+ * Timing calibration takes one CBR or two MTC packets before it can provide
+ * first estimations. Use this to supply an initial value to be used in the
+ * meantime.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @cal is NULL.
+ */
+extern int pt_tcal_set_fcr(struct pt_time_cal *tcal, uint64_t fcr);
+
+/* Update calibration based on an Intel PT packet.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+extern int pt_tcal_update_tsc(struct pt_time_cal *,
+ const struct pt_packet_tsc *,
+ const struct pt_config *);
+extern int pt_tcal_header_tsc(struct pt_time_cal *,
+ const struct pt_packet_tsc *,
+ const struct pt_config *);
+extern int pt_tcal_update_cbr(struct pt_time_cal *,
+ const struct pt_packet_cbr *,
+ const struct pt_config *);
+extern int pt_tcal_header_cbr(struct pt_time_cal *,
+ const struct pt_packet_cbr *,
+ const struct pt_config *);
+extern int pt_tcal_update_tma(struct pt_time_cal *,
+ const struct pt_packet_tma *,
+ const struct pt_config *);
+extern int pt_tcal_update_mtc(struct pt_time_cal *,
+ const struct pt_packet_mtc *,
+ const struct pt_config *);
+extern int pt_tcal_update_cyc(struct pt_time_cal *,
+ const struct pt_packet_cyc *,
+ const struct pt_config *);
+
+#endif /* PT_TIME_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pt_tnt_cache.h b/contrib/processor-trace/libipt/internal/include/pt_tnt_cache.h
new file mode 100644
index 0000000000000..67d2b3798de13
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pt_tnt_cache.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_TNT_CACHE_H
+#define PT_TNT_CACHE_H
+
+#include <stdint.h>
+
+struct pt_packet_tnt;
+struct pt_config;
+
+
+/* Keeping track of tnt indicators. */
+struct pt_tnt_cache {
+ /* The last tnt. */
+ uint64_t tnt;
+
+ /* The index into the above tnt.
+ *
+ * (tnt & index) gives the current tnt entry.
+ * (index >>= 1) moves the index to the next tnt entry.
+ * (index == 0) means that the current tnt is empty.
+ */
+ uint64_t index;
+};
+
+
+/* Initialize (or reset) the tnt cache. */
+extern void pt_tnt_cache_init(struct pt_tnt_cache *cache);
+
+/* Check if the tnt cache is empty.
+ *
+ * Returns 0 if the tnt cache is not empty.
+ * Returns > 0 if the tnt cache is empty.
+ * Returns -pte_invalid if @cache is NULL.
+ */
+extern int pt_tnt_cache_is_empty(const struct pt_tnt_cache *cache);
+
+/* Query the next tnt indicator.
+ *
+ * This consumes the returned tnt indicator in the cache.
+ *
+ * Returns 0 if the next branch is not taken.
+ * Returns > 0 if the next branch is taken.
+ * Returns -pte_invalid if @cache is NULL.
+ * Returns -pte_bad_query if there is no tnt cached.
+ */
+extern int pt_tnt_cache_query(struct pt_tnt_cache *cache);
+
+/* Update the tnt cache based on Intel PT packets.
+ *
+ * Updates @cache based on @packet and, if non-null, @config.
+ *
+ * Returns zero on success.
+ * Returns -pte_invalid if @cache or @packet is NULL.
+ * Returns -pte_bad_packet if @packet appears to be corrupted.
+ * Returns -pte_bad_context if the tnt cache is not empty.
+ */
+extern int pt_tnt_cache_update_tnt(struct pt_tnt_cache *cache,
+ const struct pt_packet_tnt *packet,
+ const struct pt_config *config);
+
+#endif /* PT_TNT_CACHE_H */
diff --git a/contrib/processor-trace/libipt/internal/include/pti-disp-defs.h b/contrib/processor-trace/libipt/internal/include/pti-disp-defs.h
new file mode 100644
index 0000000000000..82a6d04245b78
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pti-disp-defs.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(PTI_DISP_DEFS_H)
+#define PTI_DISP_DEFS_H
+
+#define PTI_DISP_NONE 0
+#define PTI_PRESERVE_DEFAULT 1
+#define PTI_BRDISP8 2
+#define PTI_DISP_BUCKET_0_l1 3
+#define PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2 4
+#define PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2 5
+#define PTI_RESOLVE_BYREG_DISP_map0x0_op0xc7_l1 6
+#endif
diff --git a/contrib/processor-trace/libipt/internal/include/pti-disp.h b/contrib/processor-trace/libipt/internal/include/pti-disp.h
new file mode 100644
index 0000000000000..99e97a3342304
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pti-disp.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+static uint8_t disp_bytes_map_0x0[256] = {
+/*opcode 0x0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf*/ 0,
+/*opcode 0x10*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x11*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x12*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x13*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x14*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x15*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x16*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x17*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x18*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x19*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x20*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x21*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x22*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x23*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x24*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x25*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x26*/ 0,
+/*opcode 0x27*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x28*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x29*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2e*/ 0,
+/*opcode 0x2f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x30*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x31*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x32*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x33*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x34*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x35*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x36*/ 0,
+/*opcode 0x37*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x38*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x39*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3e*/ 0,
+/*opcode 0x3f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x40*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x41*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x42*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x43*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x44*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x45*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x46*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x47*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x48*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x49*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x50*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x51*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x52*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x53*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x54*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x55*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x56*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x57*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x58*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x59*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x60*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x61*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x62*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x63*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x64*/ 0,
+/*opcode 0x65*/ 0,
+/*opcode 0x66*/ 0,
+/*opcode 0x67*/ 0,
+/*opcode 0x68*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x69*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x70*/ PTI_BRDISP8,
+/*opcode 0x71*/ PTI_BRDISP8,
+/*opcode 0x72*/ PTI_BRDISP8,
+/*opcode 0x73*/ PTI_BRDISP8,
+/*opcode 0x74*/ PTI_BRDISP8,
+/*opcode 0x75*/ PTI_BRDISP8,
+/*opcode 0x76*/ PTI_BRDISP8,
+/*opcode 0x77*/ PTI_BRDISP8,
+/*opcode 0x78*/ PTI_BRDISP8,
+/*opcode 0x79*/ PTI_BRDISP8,
+/*opcode 0x7a*/ PTI_BRDISP8,
+/*opcode 0x7b*/ PTI_BRDISP8,
+/*opcode 0x7c*/ PTI_BRDISP8,
+/*opcode 0x7d*/ PTI_BRDISP8,
+/*opcode 0x7e*/ PTI_BRDISP8,
+/*opcode 0x7f*/ PTI_BRDISP8,
+/*opcode 0x80*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x81*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x82*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x83*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x84*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x85*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x86*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x87*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x88*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x89*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x90*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x91*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x92*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x93*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x94*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x95*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x96*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x97*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x98*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x99*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9a*/ PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x9b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa0*/ PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2,
+/*opcode 0xa1*/ PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2,
+/*opcode 0xa2*/ PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2,
+/*opcode 0xa3*/ PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2,
+/*opcode 0xa4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xaa*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xab*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xac*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xad*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xae*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xaf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xba*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbe*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc7*/ PTI_RESOLVE_BYREG_DISP_map0x0_op0xc7_l1,
+/*opcode 0xc8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xca*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xce*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xda*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xde*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe0*/ PTI_BRDISP8,
+/*opcode 0xe1*/ PTI_BRDISP8,
+/*opcode 0xe2*/ PTI_BRDISP8,
+/*opcode 0xe3*/ PTI_BRDISP8,
+/*opcode 0xe4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe8*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0xe9*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0xea*/ PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xeb*/ PTI_BRDISP8,
+/*opcode 0xec*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xed*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xee*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xef*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf0*/ 0,
+/*opcode 0xf1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf2*/ 0,
+/*opcode 0xf3*/ 0,
+/*opcode 0xf4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfa*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfe*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xff*/ PTI_PRESERVE_DEFAULT,
+};
+static uint8_t disp_bytes_map_0x0F[256] = {
+/*opcode 0x0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4*/ 0,
+/*opcode 0x5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa*/ 0,
+/*opcode 0xb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc*/ 0,
+/*opcode 0xd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf*/ 0,
+/*opcode 0x10*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x11*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x12*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x13*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x14*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x15*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x16*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x17*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x18*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x19*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x1f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x20*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x21*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x22*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x23*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x24*/ 0,
+/*opcode 0x25*/ 0,
+/*opcode 0x26*/ 0,
+/*opcode 0x27*/ 0,
+/*opcode 0x28*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x29*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x2f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x30*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x31*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x32*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x33*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x34*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x35*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x36*/ 0,
+/*opcode 0x37*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x38*/ 0,
+/*opcode 0x39*/ 0,
+/*opcode 0x3a*/ 0,
+/*opcode 0x3b*/ 0,
+/*opcode 0x3c*/ 0,
+/*opcode 0x3d*/ 0,
+/*opcode 0x3e*/ 0,
+/*opcode 0x3f*/ 0,
+/*opcode 0x40*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x41*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x42*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x43*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x44*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x45*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x46*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x47*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x48*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x49*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x4f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x50*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x51*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x52*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x53*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x54*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x55*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x56*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x57*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x58*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x59*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x5f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x60*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x61*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x62*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x63*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x64*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x65*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x66*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x67*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x68*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x69*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x6f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x70*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x71*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x72*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x73*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x74*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x75*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x76*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x77*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x78*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x79*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x7f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x80*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x81*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x82*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x83*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x84*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x85*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x86*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x87*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x88*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x89*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x8a*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x8b*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x8c*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x8d*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x8e*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x8f*/ PTI_DISP_BUCKET_0_l1,
+/*opcode 0x90*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x91*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x92*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x93*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x94*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x95*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x96*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x97*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x98*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x99*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9a*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9b*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9c*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9d*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9e*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0x9f*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa6*/ 0,
+/*opcode 0xa7*/ 0,
+/*opcode 0xa8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xa9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xaa*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xab*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xac*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xad*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xae*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xaf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xb9*/ 0,
+/*opcode 0xba*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbe*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xbf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xc9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xca*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xce*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xcf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xd9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xda*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xde*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xdf*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xe9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xea*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xeb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xec*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xed*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xee*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xef*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf0*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf1*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf2*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf3*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf4*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf5*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf6*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf7*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf8*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xf9*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfa*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfb*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfc*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfd*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xfe*/ PTI_PRESERVE_DEFAULT,
+/*opcode 0xff*/ 0,
+};
diff --git a/contrib/processor-trace/libipt/internal/include/pti-imm-defs.h b/contrib/processor-trace/libipt/internal/include/pti-imm-defs.h
new file mode 100644
index 0000000000000..598c335638b81
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pti-imm-defs.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(PTI_IMM_DEFS_H)
+#define PTI_IMM_DEFS_H
+
+#define PTI_IMM_NONE 0
+#define PTI_0_IMM_WIDTH_CONST_l2 1
+#define PTI_UIMM8_IMM_WIDTH_CONST_l2 2
+#define PTI_SIMM8_IMM_WIDTH_CONST_l2 3
+#define PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2 4
+#define PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2 5
+#define PTI_UIMM16_IMM_WIDTH_CONST_l2 6
+#define PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf7_l1 7
+#define PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xc7_l1 8
+#define PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_DF64_EOSZ_l2 9
+#define PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf6_l1 10
+#define PTI_IMM_hasimm_map0x0_op0xc8_l1 11
+#define PTI_IMM_hasimm_map0x0F_op0x78_l1 12
+
+#endif
diff --git a/contrib/processor-trace/libipt/internal/include/pti-imm.h b/contrib/processor-trace/libipt/internal/include/pti-imm.h
new file mode 100644
index 0000000000000..7f3ceab2b3076
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pti-imm.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+static uint8_t imm_bytes_map_0x0[256] = {
+/*opcode 0x0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x5*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xd*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xe*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf*/ 0,
+/*opcode 0x10*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x11*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x12*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x13*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x14*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x15*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x16*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x17*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x18*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x19*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1c*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x1d*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x1e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x20*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x21*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x22*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x23*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x24*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x25*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x26*/ 0,
+/*opcode 0x27*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x28*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x29*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2c*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x2d*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x2e*/ 0,
+/*opcode 0x2f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x30*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x31*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x32*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x33*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x34*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x35*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x36*/ 0,
+/*opcode 0x37*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x38*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x39*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x3a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x3b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x3c*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x3d*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x3e*/ 0,
+/*opcode 0x3f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x40*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x41*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x42*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x43*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x44*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x45*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x46*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x47*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x48*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x49*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x50*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x51*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x52*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x53*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x54*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x55*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x56*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x57*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x58*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x59*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x60*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x61*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x62*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x63*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x64*/ 0,
+/*opcode 0x65*/ 0,
+/*opcode 0x66*/ 0,
+/*opcode 0x67*/ 0,
+/*opcode 0x68*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_DF64_EOSZ_l2,
+/*opcode 0x69*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x6a*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x6b*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x6c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x70*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x71*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x72*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x73*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x74*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x75*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x76*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x77*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x78*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x79*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x80*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x81*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0x82*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x83*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x84*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x85*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x86*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x87*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x88*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x89*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x90*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x91*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x92*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x93*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x94*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x95*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x96*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x97*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x98*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x99*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9a*/ PTI_UIMM16_IMM_WIDTH_CONST_l2,
+/*opcode 0x9b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa8*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xa9*/ PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xaa*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xab*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xac*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xad*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xae*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xaf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb0*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb1*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb2*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb3*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb4*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb5*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb6*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb7*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xb8*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xb9*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xba*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xbb*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xbc*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xbd*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xbe*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xbf*/ PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2,
+/*opcode 0xc0*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc1*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc2*/ PTI_UIMM16_IMM_WIDTH_CONST_l2,
+/*opcode 0xc3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc6*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc7*/ PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xc7_l1,
+/*opcode 0xc8*/ PTI_IMM_hasimm_map0x0_op0xc8_l1,
+/*opcode 0xc9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xca*/ PTI_UIMM16_IMM_WIDTH_CONST_l2,
+/*opcode 0xcb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcd*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xce*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd4*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xd5*/ PTI_SIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xd6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xda*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xde*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe4*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xe5*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xe6*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xe7*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xe8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xea*/ PTI_UIMM16_IMM_WIDTH_CONST_l2,
+/*opcode 0xeb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xec*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xed*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xee*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xef*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf0*/ 0,
+/*opcode 0xf1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf2*/ 0,
+/*opcode 0xf3*/ 0,
+/*opcode 0xf4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf6*/ PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf6_l1,
+/*opcode 0xf7*/ PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf7_l1,
+/*opcode 0xf8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfa*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfe*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xff*/ PTI_0_IMM_WIDTH_CONST_l2,
+};
+static uint8_t imm_bytes_map_0x0F[256] = {
+/*opcode 0x0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4*/ 0,
+/*opcode 0x5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa*/ 0,
+/*opcode 0xb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc*/ 0,
+/*opcode 0xd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf*/ 0,
+/*opcode 0x10*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x11*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x12*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x13*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x14*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x15*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x16*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x17*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x18*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x19*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x1f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x20*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x21*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x22*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x23*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x24*/ 0,
+/*opcode 0x25*/ 0,
+/*opcode 0x26*/ 0,
+/*opcode 0x27*/ 0,
+/*opcode 0x28*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x29*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x2f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x30*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x31*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x32*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x33*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x34*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x35*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x36*/ 0,
+/*opcode 0x37*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x38*/ 0,
+/*opcode 0x39*/ 0,
+/*opcode 0x3a*/ 0,
+/*opcode 0x3b*/ 0,
+/*opcode 0x3c*/ 0,
+/*opcode 0x3d*/ 0,
+/*opcode 0x3e*/ 0,
+/*opcode 0x3f*/ 0,
+/*opcode 0x40*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x41*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x42*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x43*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x44*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x45*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x46*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x47*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x48*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x49*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x4f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x50*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x51*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x52*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x53*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x54*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x55*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x56*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x57*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x58*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x59*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x5f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x60*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x61*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x62*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x63*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x64*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x65*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x66*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x67*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x68*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x69*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x6f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x70*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x71*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x72*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x73*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0x74*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x75*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x76*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x77*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x78*/ PTI_IMM_hasimm_map0x0F_op0x78_l1,
+/*opcode 0x79*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x7f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x80*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x81*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x82*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x83*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x84*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x85*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x86*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x87*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x88*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x89*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x8f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x90*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x91*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x92*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x93*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x94*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x95*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x96*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x97*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x98*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x99*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9a*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9b*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9c*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9d*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9e*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0x9f*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa4*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xa5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa6*/ 0,
+/*opcode 0xa7*/ 0,
+/*opcode 0xa8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xa9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xaa*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xab*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xac*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xad*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xae*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xaf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xb9*/ 0,
+/*opcode 0xba*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xbb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xbc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xbd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xbe*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xbf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc2*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc4*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc5*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc6*/ PTI_UIMM8_IMM_WIDTH_CONST_l2,
+/*opcode 0xc7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xc9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xca*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xce*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xcf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xd9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xda*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xde*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xdf*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xe9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xea*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xeb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xec*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xed*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xee*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xef*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf0*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf1*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf2*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf3*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf4*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf5*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf6*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf7*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf8*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xf9*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfa*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfb*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfc*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfd*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xfe*/ PTI_0_IMM_WIDTH_CONST_l2,
+/*opcode 0xff*/ 0,
+};
diff --git a/contrib/processor-trace/libipt/internal/include/pti-modrm-defs.h b/contrib/processor-trace/libipt/internal/include/pti-modrm-defs.h
new file mode 100644
index 0000000000000..fd75618ce172a
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pti-modrm-defs.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if !defined(PTI_MODRM_DEFS_H)
+#define PTI_MODRM_DEFS_H
+
+
+#define PTI_MODRM_FALSE 0
+#define PTI_MODRM_TRUE 1
+#define PTI_MODRM_IGNORE_MOD 2
+#define PTI_MODRM_UNDEF 3
+
+#endif
diff --git a/contrib/processor-trace/libipt/internal/include/pti-modrm.h b/contrib/processor-trace/libipt/internal/include/pti-modrm.h
new file mode 100644
index 0000000000000..ddddf63d29db4
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/pti-modrm.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+static uint8_t has_modrm_map_0x0[256] = {
+/*opcode 0x0*/ PTI_MODRM_TRUE,
+/*opcode 0x1*/ PTI_MODRM_TRUE,
+/*opcode 0x2*/ PTI_MODRM_TRUE,
+/*opcode 0x3*/ PTI_MODRM_TRUE,
+/*opcode 0x4*/ PTI_MODRM_FALSE,
+/*opcode 0x5*/ PTI_MODRM_FALSE,
+/*opcode 0x6*/ PTI_MODRM_FALSE,
+/*opcode 0x7*/ PTI_MODRM_FALSE,
+/*opcode 0x8*/ PTI_MODRM_TRUE,
+/*opcode 0x9*/ PTI_MODRM_TRUE,
+/*opcode 0xa*/ PTI_MODRM_TRUE,
+/*opcode 0xb*/ PTI_MODRM_TRUE,
+/*opcode 0xc*/ PTI_MODRM_FALSE,
+/*opcode 0xd*/ PTI_MODRM_FALSE,
+/*opcode 0xe*/ PTI_MODRM_FALSE,
+/*opcode 0xf*/ PTI_MODRM_UNDEF,
+/*opcode 0x10*/ PTI_MODRM_TRUE,
+/*opcode 0x11*/ PTI_MODRM_TRUE,
+/*opcode 0x12*/ PTI_MODRM_TRUE,
+/*opcode 0x13*/ PTI_MODRM_TRUE,
+/*opcode 0x14*/ PTI_MODRM_FALSE,
+/*opcode 0x15*/ PTI_MODRM_FALSE,
+/*opcode 0x16*/ PTI_MODRM_FALSE,
+/*opcode 0x17*/ PTI_MODRM_FALSE,
+/*opcode 0x18*/ PTI_MODRM_TRUE,
+/*opcode 0x19*/ PTI_MODRM_TRUE,
+/*opcode 0x1a*/ PTI_MODRM_TRUE,
+/*opcode 0x1b*/ PTI_MODRM_TRUE,
+/*opcode 0x1c*/ PTI_MODRM_FALSE,
+/*opcode 0x1d*/ PTI_MODRM_FALSE,
+/*opcode 0x1e*/ PTI_MODRM_FALSE,
+/*opcode 0x1f*/ PTI_MODRM_FALSE,
+/*opcode 0x20*/ PTI_MODRM_TRUE,
+/*opcode 0x21*/ PTI_MODRM_TRUE,
+/*opcode 0x22*/ PTI_MODRM_TRUE,
+/*opcode 0x23*/ PTI_MODRM_TRUE,
+/*opcode 0x24*/ PTI_MODRM_FALSE,
+/*opcode 0x25*/ PTI_MODRM_FALSE,
+/*opcode 0x26*/ PTI_MODRM_UNDEF,
+/*opcode 0x27*/ PTI_MODRM_FALSE,
+/*opcode 0x28*/ PTI_MODRM_TRUE,
+/*opcode 0x29*/ PTI_MODRM_TRUE,
+/*opcode 0x2a*/ PTI_MODRM_TRUE,
+/*opcode 0x2b*/ PTI_MODRM_TRUE,
+/*opcode 0x2c*/ PTI_MODRM_FALSE,
+/*opcode 0x2d*/ PTI_MODRM_FALSE,
+/*opcode 0x2e*/ PTI_MODRM_UNDEF,
+/*opcode 0x2f*/ PTI_MODRM_FALSE,
+/*opcode 0x30*/ PTI_MODRM_TRUE,
+/*opcode 0x31*/ PTI_MODRM_TRUE,
+/*opcode 0x32*/ PTI_MODRM_TRUE,
+/*opcode 0x33*/ PTI_MODRM_TRUE,
+/*opcode 0x34*/ PTI_MODRM_FALSE,
+/*opcode 0x35*/ PTI_MODRM_FALSE,
+/*opcode 0x36*/ PTI_MODRM_UNDEF,
+/*opcode 0x37*/ PTI_MODRM_FALSE,
+/*opcode 0x38*/ PTI_MODRM_TRUE,
+/*opcode 0x39*/ PTI_MODRM_TRUE,
+/*opcode 0x3a*/ PTI_MODRM_TRUE,
+/*opcode 0x3b*/ PTI_MODRM_TRUE,
+/*opcode 0x3c*/ PTI_MODRM_FALSE,
+/*opcode 0x3d*/ PTI_MODRM_FALSE,
+/*opcode 0x3e*/ PTI_MODRM_UNDEF,
+/*opcode 0x3f*/ PTI_MODRM_FALSE,
+/*opcode 0x40*/ PTI_MODRM_FALSE,
+/*opcode 0x41*/ PTI_MODRM_FALSE,
+/*opcode 0x42*/ PTI_MODRM_FALSE,
+/*opcode 0x43*/ PTI_MODRM_FALSE,
+/*opcode 0x44*/ PTI_MODRM_FALSE,
+/*opcode 0x45*/ PTI_MODRM_FALSE,
+/*opcode 0x46*/ PTI_MODRM_FALSE,
+/*opcode 0x47*/ PTI_MODRM_FALSE,
+/*opcode 0x48*/ PTI_MODRM_FALSE,
+/*opcode 0x49*/ PTI_MODRM_FALSE,
+/*opcode 0x4a*/ PTI_MODRM_FALSE,
+/*opcode 0x4b*/ PTI_MODRM_FALSE,
+/*opcode 0x4c*/ PTI_MODRM_FALSE,
+/*opcode 0x4d*/ PTI_MODRM_FALSE,
+/*opcode 0x4e*/ PTI_MODRM_FALSE,
+/*opcode 0x4f*/ PTI_MODRM_FALSE,
+/*opcode 0x50*/ PTI_MODRM_FALSE,
+/*opcode 0x51*/ PTI_MODRM_FALSE,
+/*opcode 0x52*/ PTI_MODRM_FALSE,
+/*opcode 0x53*/ PTI_MODRM_FALSE,
+/*opcode 0x54*/ PTI_MODRM_FALSE,
+/*opcode 0x55*/ PTI_MODRM_FALSE,
+/*opcode 0x56*/ PTI_MODRM_FALSE,
+/*opcode 0x57*/ PTI_MODRM_FALSE,
+/*opcode 0x58*/ PTI_MODRM_FALSE,
+/*opcode 0x59*/ PTI_MODRM_FALSE,
+/*opcode 0x5a*/ PTI_MODRM_FALSE,
+/*opcode 0x5b*/ PTI_MODRM_FALSE,
+/*opcode 0x5c*/ PTI_MODRM_FALSE,
+/*opcode 0x5d*/ PTI_MODRM_FALSE,
+/*opcode 0x5e*/ PTI_MODRM_FALSE,
+/*opcode 0x5f*/ PTI_MODRM_FALSE,
+/*opcode 0x60*/ PTI_MODRM_FALSE,
+/*opcode 0x61*/ PTI_MODRM_FALSE,
+/*opcode 0x62*/ PTI_MODRM_TRUE,
+/*opcode 0x63*/ PTI_MODRM_TRUE,
+/*opcode 0x64*/ PTI_MODRM_UNDEF,
+/*opcode 0x65*/ PTI_MODRM_UNDEF,
+/*opcode 0x66*/ PTI_MODRM_UNDEF,
+/*opcode 0x67*/ PTI_MODRM_UNDEF,
+/*opcode 0x68*/ PTI_MODRM_FALSE,
+/*opcode 0x69*/ PTI_MODRM_TRUE,
+/*opcode 0x6a*/ PTI_MODRM_FALSE,
+/*opcode 0x6b*/ PTI_MODRM_TRUE,
+/*opcode 0x6c*/ PTI_MODRM_FALSE,
+/*opcode 0x6d*/ PTI_MODRM_FALSE,
+/*opcode 0x6e*/ PTI_MODRM_FALSE,
+/*opcode 0x6f*/ PTI_MODRM_FALSE,
+/*opcode 0x70*/ PTI_MODRM_FALSE,
+/*opcode 0x71*/ PTI_MODRM_FALSE,
+/*opcode 0x72*/ PTI_MODRM_FALSE,
+/*opcode 0x73*/ PTI_MODRM_FALSE,
+/*opcode 0x74*/ PTI_MODRM_FALSE,
+/*opcode 0x75*/ PTI_MODRM_FALSE,
+/*opcode 0x76*/ PTI_MODRM_FALSE,
+/*opcode 0x77*/ PTI_MODRM_FALSE,
+/*opcode 0x78*/ PTI_MODRM_FALSE,
+/*opcode 0x79*/ PTI_MODRM_FALSE,
+/*opcode 0x7a*/ PTI_MODRM_FALSE,
+/*opcode 0x7b*/ PTI_MODRM_FALSE,
+/*opcode 0x7c*/ PTI_MODRM_FALSE,
+/*opcode 0x7d*/ PTI_MODRM_FALSE,
+/*opcode 0x7e*/ PTI_MODRM_FALSE,
+/*opcode 0x7f*/ PTI_MODRM_FALSE,
+/*opcode 0x80*/ PTI_MODRM_TRUE,
+/*opcode 0x81*/ PTI_MODRM_TRUE,
+/*opcode 0x82*/ PTI_MODRM_TRUE,
+/*opcode 0x83*/ PTI_MODRM_TRUE,
+/*opcode 0x84*/ PTI_MODRM_TRUE,
+/*opcode 0x85*/ PTI_MODRM_TRUE,
+/*opcode 0x86*/ PTI_MODRM_TRUE,
+/*opcode 0x87*/ PTI_MODRM_TRUE,
+/*opcode 0x88*/ PTI_MODRM_TRUE,
+/*opcode 0x89*/ PTI_MODRM_TRUE,
+/*opcode 0x8a*/ PTI_MODRM_TRUE,
+/*opcode 0x8b*/ PTI_MODRM_TRUE,
+/*opcode 0x8c*/ PTI_MODRM_TRUE,
+/*opcode 0x8d*/ PTI_MODRM_TRUE,
+/*opcode 0x8e*/ PTI_MODRM_TRUE,
+/*opcode 0x8f*/ PTI_MODRM_TRUE,
+/*opcode 0x90*/ PTI_MODRM_FALSE,
+/*opcode 0x91*/ PTI_MODRM_FALSE,
+/*opcode 0x92*/ PTI_MODRM_FALSE,
+/*opcode 0x93*/ PTI_MODRM_FALSE,
+/*opcode 0x94*/ PTI_MODRM_FALSE,
+/*opcode 0x95*/ PTI_MODRM_FALSE,
+/*opcode 0x96*/ PTI_MODRM_FALSE,
+/*opcode 0x97*/ PTI_MODRM_FALSE,
+/*opcode 0x98*/ PTI_MODRM_FALSE,
+/*opcode 0x99*/ PTI_MODRM_FALSE,
+/*opcode 0x9a*/ PTI_MODRM_FALSE,
+/*opcode 0x9b*/ PTI_MODRM_FALSE,
+/*opcode 0x9c*/ PTI_MODRM_FALSE,
+/*opcode 0x9d*/ PTI_MODRM_FALSE,
+/*opcode 0x9e*/ PTI_MODRM_FALSE,
+/*opcode 0x9f*/ PTI_MODRM_FALSE,
+/*opcode 0xa0*/ PTI_MODRM_FALSE,
+/*opcode 0xa1*/ PTI_MODRM_FALSE,
+/*opcode 0xa2*/ PTI_MODRM_FALSE,
+/*opcode 0xa3*/ PTI_MODRM_FALSE,
+/*opcode 0xa4*/ PTI_MODRM_FALSE,
+/*opcode 0xa5*/ PTI_MODRM_FALSE,
+/*opcode 0xa6*/ PTI_MODRM_FALSE,
+/*opcode 0xa7*/ PTI_MODRM_FALSE,
+/*opcode 0xa8*/ PTI_MODRM_FALSE,
+/*opcode 0xa9*/ PTI_MODRM_FALSE,
+/*opcode 0xaa*/ PTI_MODRM_FALSE,
+/*opcode 0xab*/ PTI_MODRM_FALSE,
+/*opcode 0xac*/ PTI_MODRM_FALSE,
+/*opcode 0xad*/ PTI_MODRM_FALSE,
+/*opcode 0xae*/ PTI_MODRM_FALSE,
+/*opcode 0xaf*/ PTI_MODRM_FALSE,
+/*opcode 0xb0*/ PTI_MODRM_FALSE,
+/*opcode 0xb1*/ PTI_MODRM_FALSE,
+/*opcode 0xb2*/ PTI_MODRM_FALSE,
+/*opcode 0xb3*/ PTI_MODRM_FALSE,
+/*opcode 0xb4*/ PTI_MODRM_FALSE,
+/*opcode 0xb5*/ PTI_MODRM_FALSE,
+/*opcode 0xb6*/ PTI_MODRM_FALSE,
+/*opcode 0xb7*/ PTI_MODRM_FALSE,
+/*opcode 0xb8*/ PTI_MODRM_FALSE,
+/*opcode 0xb9*/ PTI_MODRM_FALSE,
+/*opcode 0xba*/ PTI_MODRM_FALSE,
+/*opcode 0xbb*/ PTI_MODRM_FALSE,
+/*opcode 0xbc*/ PTI_MODRM_FALSE,
+/*opcode 0xbd*/ PTI_MODRM_FALSE,
+/*opcode 0xbe*/ PTI_MODRM_FALSE,
+/*opcode 0xbf*/ PTI_MODRM_FALSE,
+/*opcode 0xc0*/ PTI_MODRM_TRUE,
+/*opcode 0xc1*/ PTI_MODRM_TRUE,
+/*opcode 0xc2*/ PTI_MODRM_FALSE,
+/*opcode 0xc3*/ PTI_MODRM_FALSE,
+/*opcode 0xc4*/ PTI_MODRM_TRUE,
+/*opcode 0xc5*/ PTI_MODRM_TRUE,
+/*opcode 0xc6*/ PTI_MODRM_TRUE,
+/*opcode 0xc7*/ PTI_MODRM_TRUE,
+/*opcode 0xc8*/ PTI_MODRM_FALSE,
+/*opcode 0xc9*/ PTI_MODRM_FALSE,
+/*opcode 0xca*/ PTI_MODRM_FALSE,
+/*opcode 0xcb*/ PTI_MODRM_FALSE,
+/*opcode 0xcc*/ PTI_MODRM_FALSE,
+/*opcode 0xcd*/ PTI_MODRM_FALSE,
+/*opcode 0xce*/ PTI_MODRM_FALSE,
+/*opcode 0xcf*/ PTI_MODRM_FALSE,
+/*opcode 0xd0*/ PTI_MODRM_TRUE,
+/*opcode 0xd1*/ PTI_MODRM_TRUE,
+/*opcode 0xd2*/ PTI_MODRM_TRUE,
+/*opcode 0xd3*/ PTI_MODRM_TRUE,
+/*opcode 0xd4*/ PTI_MODRM_FALSE,
+/*opcode 0xd5*/ PTI_MODRM_FALSE,
+/*opcode 0xd6*/ PTI_MODRM_FALSE,
+/*opcode 0xd7*/ PTI_MODRM_FALSE,
+/*opcode 0xd8*/ PTI_MODRM_TRUE,
+/*opcode 0xd9*/ PTI_MODRM_TRUE,
+/*opcode 0xda*/ PTI_MODRM_TRUE,
+/*opcode 0xdb*/ PTI_MODRM_TRUE,
+/*opcode 0xdc*/ PTI_MODRM_TRUE,
+/*opcode 0xdd*/ PTI_MODRM_TRUE,
+/*opcode 0xde*/ PTI_MODRM_TRUE,
+/*opcode 0xdf*/ PTI_MODRM_TRUE,
+/*opcode 0xe0*/ PTI_MODRM_FALSE,
+/*opcode 0xe1*/ PTI_MODRM_FALSE,
+/*opcode 0xe2*/ PTI_MODRM_FALSE,
+/*opcode 0xe3*/ PTI_MODRM_FALSE,
+/*opcode 0xe4*/ PTI_MODRM_FALSE,
+/*opcode 0xe5*/ PTI_MODRM_FALSE,
+/*opcode 0xe6*/ PTI_MODRM_FALSE,
+/*opcode 0xe7*/ PTI_MODRM_FALSE,
+/*opcode 0xe8*/ PTI_MODRM_FALSE,
+/*opcode 0xe9*/ PTI_MODRM_FALSE,
+/*opcode 0xea*/ PTI_MODRM_FALSE,
+/*opcode 0xeb*/ PTI_MODRM_FALSE,
+/*opcode 0xec*/ PTI_MODRM_FALSE,
+/*opcode 0xed*/ PTI_MODRM_FALSE,
+/*opcode 0xee*/ PTI_MODRM_FALSE,
+/*opcode 0xef*/ PTI_MODRM_FALSE,
+/*opcode 0xf0*/ PTI_MODRM_UNDEF,
+/*opcode 0xf1*/ PTI_MODRM_FALSE,
+/*opcode 0xf2*/ PTI_MODRM_UNDEF,
+/*opcode 0xf3*/ PTI_MODRM_UNDEF,
+/*opcode 0xf4*/ PTI_MODRM_FALSE,
+/*opcode 0xf5*/ PTI_MODRM_FALSE,
+/*opcode 0xf6*/ PTI_MODRM_TRUE,
+/*opcode 0xf7*/ PTI_MODRM_TRUE,
+/*opcode 0xf8*/ PTI_MODRM_FALSE,
+/*opcode 0xf9*/ PTI_MODRM_FALSE,
+/*opcode 0xfa*/ PTI_MODRM_FALSE,
+/*opcode 0xfb*/ PTI_MODRM_FALSE,
+/*opcode 0xfc*/ PTI_MODRM_FALSE,
+/*opcode 0xfd*/ PTI_MODRM_FALSE,
+/*opcode 0xfe*/ PTI_MODRM_TRUE,
+/*opcode 0xff*/ PTI_MODRM_TRUE,
+};
+static uint8_t has_modrm_map_0x0F[256] = {
+/*opcode 0x0*/ PTI_MODRM_TRUE,
+/*opcode 0x1*/ PTI_MODRM_TRUE,
+/*opcode 0x2*/ PTI_MODRM_TRUE,
+/*opcode 0x3*/ PTI_MODRM_TRUE,
+/*opcode 0x4*/ PTI_MODRM_UNDEF,
+/*opcode 0x5*/ PTI_MODRM_FALSE,
+/*opcode 0x6*/ PTI_MODRM_FALSE,
+/*opcode 0x7*/ PTI_MODRM_FALSE,
+/*opcode 0x8*/ PTI_MODRM_FALSE,
+/*opcode 0x9*/ PTI_MODRM_FALSE,
+/*opcode 0xa*/ PTI_MODRM_UNDEF,
+/*opcode 0xb*/ PTI_MODRM_FALSE,
+/*opcode 0xc*/ PTI_MODRM_UNDEF,
+/*opcode 0xd*/ PTI_MODRM_TRUE,
+/*opcode 0xe*/ PTI_MODRM_FALSE,
+/*opcode 0xf*/ PTI_MODRM_UNDEF,
+/*opcode 0x10*/ PTI_MODRM_TRUE,
+/*opcode 0x11*/ PTI_MODRM_TRUE,
+/*opcode 0x12*/ PTI_MODRM_TRUE,
+/*opcode 0x13*/ PTI_MODRM_TRUE,
+/*opcode 0x14*/ PTI_MODRM_TRUE,
+/*opcode 0x15*/ PTI_MODRM_TRUE,
+/*opcode 0x16*/ PTI_MODRM_TRUE,
+/*opcode 0x17*/ PTI_MODRM_TRUE,
+/*opcode 0x18*/ PTI_MODRM_TRUE,
+/*opcode 0x19*/ PTI_MODRM_TRUE,
+/*opcode 0x1a*/ PTI_MODRM_TRUE,
+/*opcode 0x1b*/ PTI_MODRM_TRUE,
+/*opcode 0x1c*/ PTI_MODRM_TRUE,
+/*opcode 0x1d*/ PTI_MODRM_TRUE,
+/*opcode 0x1e*/ PTI_MODRM_TRUE,
+/*opcode 0x1f*/ PTI_MODRM_TRUE,
+/*opcode 0x20*/ PTI_MODRM_IGNORE_MOD,
+/*opcode 0x21*/ PTI_MODRM_IGNORE_MOD,
+/*opcode 0x22*/ PTI_MODRM_IGNORE_MOD,
+/*opcode 0x23*/ PTI_MODRM_IGNORE_MOD,
+/*opcode 0x24*/ PTI_MODRM_UNDEF,
+/*opcode 0x25*/ PTI_MODRM_UNDEF,
+/*opcode 0x26*/ PTI_MODRM_UNDEF,
+/*opcode 0x27*/ PTI_MODRM_UNDEF,
+/*opcode 0x28*/ PTI_MODRM_TRUE,
+/*opcode 0x29*/ PTI_MODRM_TRUE,
+/*opcode 0x2a*/ PTI_MODRM_TRUE,
+/*opcode 0x2b*/ PTI_MODRM_TRUE,
+/*opcode 0x2c*/ PTI_MODRM_TRUE,
+/*opcode 0x2d*/ PTI_MODRM_TRUE,
+/*opcode 0x2e*/ PTI_MODRM_TRUE,
+/*opcode 0x2f*/ PTI_MODRM_TRUE,
+/*opcode 0x30*/ PTI_MODRM_FALSE,
+/*opcode 0x31*/ PTI_MODRM_FALSE,
+/*opcode 0x32*/ PTI_MODRM_FALSE,
+/*opcode 0x33*/ PTI_MODRM_FALSE,
+/*opcode 0x34*/ PTI_MODRM_FALSE,
+/*opcode 0x35*/ PTI_MODRM_FALSE,
+/*opcode 0x36*/ PTI_MODRM_UNDEF,
+/*opcode 0x37*/ PTI_MODRM_FALSE,
+/*opcode 0x38*/ PTI_MODRM_UNDEF,
+/*opcode 0x39*/ PTI_MODRM_UNDEF,
+/*opcode 0x3a*/ PTI_MODRM_UNDEF,
+/*opcode 0x3b*/ PTI_MODRM_UNDEF,
+/*opcode 0x3c*/ PTI_MODRM_UNDEF,
+/*opcode 0x3d*/ PTI_MODRM_UNDEF,
+/*opcode 0x3e*/ PTI_MODRM_UNDEF,
+/*opcode 0x3f*/ PTI_MODRM_UNDEF,
+/*opcode 0x40*/ PTI_MODRM_TRUE,
+/*opcode 0x41*/ PTI_MODRM_TRUE,
+/*opcode 0x42*/ PTI_MODRM_TRUE,
+/*opcode 0x43*/ PTI_MODRM_TRUE,
+/*opcode 0x44*/ PTI_MODRM_TRUE,
+/*opcode 0x45*/ PTI_MODRM_TRUE,
+/*opcode 0x46*/ PTI_MODRM_TRUE,
+/*opcode 0x47*/ PTI_MODRM_TRUE,
+/*opcode 0x48*/ PTI_MODRM_TRUE,
+/*opcode 0x49*/ PTI_MODRM_TRUE,
+/*opcode 0x4a*/ PTI_MODRM_TRUE,
+/*opcode 0x4b*/ PTI_MODRM_TRUE,
+/*opcode 0x4c*/ PTI_MODRM_TRUE,
+/*opcode 0x4d*/ PTI_MODRM_TRUE,
+/*opcode 0x4e*/ PTI_MODRM_TRUE,
+/*opcode 0x4f*/ PTI_MODRM_TRUE,
+/*opcode 0x50*/ PTI_MODRM_TRUE,
+/*opcode 0x51*/ PTI_MODRM_TRUE,
+/*opcode 0x52*/ PTI_MODRM_TRUE,
+/*opcode 0x53*/ PTI_MODRM_TRUE,
+/*opcode 0x54*/ PTI_MODRM_TRUE,
+/*opcode 0x55*/ PTI_MODRM_TRUE,
+/*opcode 0x56*/ PTI_MODRM_TRUE,
+/*opcode 0x57*/ PTI_MODRM_TRUE,
+/*opcode 0x58*/ PTI_MODRM_TRUE,
+/*opcode 0x59*/ PTI_MODRM_TRUE,
+/*opcode 0x5a*/ PTI_MODRM_TRUE,
+/*opcode 0x5b*/ PTI_MODRM_TRUE,
+/*opcode 0x5c*/ PTI_MODRM_TRUE,
+/*opcode 0x5d*/ PTI_MODRM_TRUE,
+/*opcode 0x5e*/ PTI_MODRM_TRUE,
+/*opcode 0x5f*/ PTI_MODRM_TRUE,
+/*opcode 0x60*/ PTI_MODRM_TRUE,
+/*opcode 0x61*/ PTI_MODRM_TRUE,
+/*opcode 0x62*/ PTI_MODRM_TRUE,
+/*opcode 0x63*/ PTI_MODRM_TRUE,
+/*opcode 0x64*/ PTI_MODRM_TRUE,
+/*opcode 0x65*/ PTI_MODRM_TRUE,
+/*opcode 0x66*/ PTI_MODRM_TRUE,
+/*opcode 0x67*/ PTI_MODRM_TRUE,
+/*opcode 0x68*/ PTI_MODRM_TRUE,
+/*opcode 0x69*/ PTI_MODRM_TRUE,
+/*opcode 0x6a*/ PTI_MODRM_TRUE,
+/*opcode 0x6b*/ PTI_MODRM_TRUE,
+/*opcode 0x6c*/ PTI_MODRM_TRUE,
+/*opcode 0x6d*/ PTI_MODRM_TRUE,
+/*opcode 0x6e*/ PTI_MODRM_TRUE,
+/*opcode 0x6f*/ PTI_MODRM_TRUE,
+/*opcode 0x70*/ PTI_MODRM_TRUE,
+/*opcode 0x71*/ PTI_MODRM_TRUE,
+/*opcode 0x72*/ PTI_MODRM_TRUE,
+/*opcode 0x73*/ PTI_MODRM_TRUE,
+/*opcode 0x74*/ PTI_MODRM_TRUE,
+/*opcode 0x75*/ PTI_MODRM_TRUE,
+/*opcode 0x76*/ PTI_MODRM_TRUE,
+/*opcode 0x77*/ PTI_MODRM_FALSE,
+/*opcode 0x78*/ PTI_MODRM_TRUE,
+/*opcode 0x79*/ PTI_MODRM_TRUE,
+/*opcode 0x7a*/ PTI_MODRM_TRUE,
+/*opcode 0x7b*/ PTI_MODRM_TRUE,
+/*opcode 0x7c*/ PTI_MODRM_TRUE,
+/*opcode 0x7d*/ PTI_MODRM_TRUE,
+/*opcode 0x7e*/ PTI_MODRM_TRUE,
+/*opcode 0x7f*/ PTI_MODRM_TRUE,
+/*opcode 0x80*/ PTI_MODRM_FALSE,
+/*opcode 0x81*/ PTI_MODRM_FALSE,
+/*opcode 0x82*/ PTI_MODRM_FALSE,
+/*opcode 0x83*/ PTI_MODRM_FALSE,
+/*opcode 0x84*/ PTI_MODRM_FALSE,
+/*opcode 0x85*/ PTI_MODRM_FALSE,
+/*opcode 0x86*/ PTI_MODRM_FALSE,
+/*opcode 0x87*/ PTI_MODRM_FALSE,
+/*opcode 0x88*/ PTI_MODRM_FALSE,
+/*opcode 0x89*/ PTI_MODRM_FALSE,
+/*opcode 0x8a*/ PTI_MODRM_FALSE,
+/*opcode 0x8b*/ PTI_MODRM_FALSE,
+/*opcode 0x8c*/ PTI_MODRM_FALSE,
+/*opcode 0x8d*/ PTI_MODRM_FALSE,
+/*opcode 0x8e*/ PTI_MODRM_FALSE,
+/*opcode 0x8f*/ PTI_MODRM_FALSE,
+/*opcode 0x90*/ PTI_MODRM_TRUE,
+/*opcode 0x91*/ PTI_MODRM_TRUE,
+/*opcode 0x92*/ PTI_MODRM_TRUE,
+/*opcode 0x93*/ PTI_MODRM_TRUE,
+/*opcode 0x94*/ PTI_MODRM_TRUE,
+/*opcode 0x95*/ PTI_MODRM_TRUE,
+/*opcode 0x96*/ PTI_MODRM_TRUE,
+/*opcode 0x97*/ PTI_MODRM_TRUE,
+/*opcode 0x98*/ PTI_MODRM_TRUE,
+/*opcode 0x99*/ PTI_MODRM_TRUE,
+/*opcode 0x9a*/ PTI_MODRM_TRUE,
+/*opcode 0x9b*/ PTI_MODRM_TRUE,
+/*opcode 0x9c*/ PTI_MODRM_TRUE,
+/*opcode 0x9d*/ PTI_MODRM_TRUE,
+/*opcode 0x9e*/ PTI_MODRM_TRUE,
+/*opcode 0x9f*/ PTI_MODRM_TRUE,
+/*opcode 0xa0*/ PTI_MODRM_FALSE,
+/*opcode 0xa1*/ PTI_MODRM_FALSE,
+/*opcode 0xa2*/ PTI_MODRM_FALSE,
+/*opcode 0xa3*/ PTI_MODRM_TRUE,
+/*opcode 0xa4*/ PTI_MODRM_TRUE,
+/*opcode 0xa5*/ PTI_MODRM_TRUE,
+/*opcode 0xa6*/ PTI_MODRM_UNDEF,
+/*opcode 0xa7*/ PTI_MODRM_UNDEF,
+/*opcode 0xa8*/ PTI_MODRM_FALSE,
+/*opcode 0xa9*/ PTI_MODRM_FALSE,
+/*opcode 0xaa*/ PTI_MODRM_FALSE,
+/*opcode 0xab*/ PTI_MODRM_TRUE,
+/*opcode 0xac*/ PTI_MODRM_TRUE,
+/*opcode 0xad*/ PTI_MODRM_TRUE,
+/*opcode 0xae*/ PTI_MODRM_TRUE,
+/*opcode 0xaf*/ PTI_MODRM_TRUE,
+/*opcode 0xb0*/ PTI_MODRM_TRUE,
+/*opcode 0xb1*/ PTI_MODRM_TRUE,
+/*opcode 0xb2*/ PTI_MODRM_TRUE,
+/*opcode 0xb3*/ PTI_MODRM_TRUE,
+/*opcode 0xb4*/ PTI_MODRM_TRUE,
+/*opcode 0xb5*/ PTI_MODRM_TRUE,
+/*opcode 0xb6*/ PTI_MODRM_TRUE,
+/*opcode 0xb7*/ PTI_MODRM_TRUE,
+/*opcode 0xb8*/ PTI_MODRM_TRUE,
+/*opcode 0xb9*/ PTI_MODRM_UNDEF,
+/*opcode 0xba*/ PTI_MODRM_TRUE,
+/*opcode 0xbb*/ PTI_MODRM_TRUE,
+/*opcode 0xbc*/ PTI_MODRM_TRUE,
+/*opcode 0xbd*/ PTI_MODRM_TRUE,
+/*opcode 0xbe*/ PTI_MODRM_TRUE,
+/*opcode 0xbf*/ PTI_MODRM_TRUE,
+/*opcode 0xc0*/ PTI_MODRM_TRUE,
+/*opcode 0xc1*/ PTI_MODRM_TRUE,
+/*opcode 0xc2*/ PTI_MODRM_TRUE,
+/*opcode 0xc3*/ PTI_MODRM_TRUE,
+/*opcode 0xc4*/ PTI_MODRM_TRUE,
+/*opcode 0xc5*/ PTI_MODRM_TRUE,
+/*opcode 0xc6*/ PTI_MODRM_TRUE,
+/*opcode 0xc7*/ PTI_MODRM_TRUE,
+/*opcode 0xc8*/ PTI_MODRM_FALSE,
+/*opcode 0xc9*/ PTI_MODRM_FALSE,
+/*opcode 0xca*/ PTI_MODRM_FALSE,
+/*opcode 0xcb*/ PTI_MODRM_FALSE,
+/*opcode 0xcc*/ PTI_MODRM_FALSE,
+/*opcode 0xcd*/ PTI_MODRM_FALSE,
+/*opcode 0xce*/ PTI_MODRM_FALSE,
+/*opcode 0xcf*/ PTI_MODRM_FALSE,
+/*opcode 0xd0*/ PTI_MODRM_TRUE,
+/*opcode 0xd1*/ PTI_MODRM_TRUE,
+/*opcode 0xd2*/ PTI_MODRM_TRUE,
+/*opcode 0xd3*/ PTI_MODRM_TRUE,
+/*opcode 0xd4*/ PTI_MODRM_TRUE,
+/*opcode 0xd5*/ PTI_MODRM_TRUE,
+/*opcode 0xd6*/ PTI_MODRM_TRUE,
+/*opcode 0xd7*/ PTI_MODRM_TRUE,
+/*opcode 0xd8*/ PTI_MODRM_TRUE,
+/*opcode 0xd9*/ PTI_MODRM_TRUE,
+/*opcode 0xda*/ PTI_MODRM_TRUE,
+/*opcode 0xdb*/ PTI_MODRM_TRUE,
+/*opcode 0xdc*/ PTI_MODRM_TRUE,
+/*opcode 0xdd*/ PTI_MODRM_TRUE,
+/*opcode 0xde*/ PTI_MODRM_TRUE,
+/*opcode 0xdf*/ PTI_MODRM_TRUE,
+/*opcode 0xe0*/ PTI_MODRM_TRUE,
+/*opcode 0xe1*/ PTI_MODRM_TRUE,
+/*opcode 0xe2*/ PTI_MODRM_TRUE,
+/*opcode 0xe3*/ PTI_MODRM_TRUE,
+/*opcode 0xe4*/ PTI_MODRM_TRUE,
+/*opcode 0xe5*/ PTI_MODRM_TRUE,
+/*opcode 0xe6*/ PTI_MODRM_TRUE,
+/*opcode 0xe7*/ PTI_MODRM_TRUE,
+/*opcode 0xe8*/ PTI_MODRM_TRUE,
+/*opcode 0xe9*/ PTI_MODRM_TRUE,
+/*opcode 0xea*/ PTI_MODRM_TRUE,
+/*opcode 0xeb*/ PTI_MODRM_TRUE,
+/*opcode 0xec*/ PTI_MODRM_TRUE,
+/*opcode 0xed*/ PTI_MODRM_TRUE,
+/*opcode 0xee*/ PTI_MODRM_TRUE,
+/*opcode 0xef*/ PTI_MODRM_TRUE,
+/*opcode 0xf0*/ PTI_MODRM_TRUE,
+/*opcode 0xf1*/ PTI_MODRM_TRUE,
+/*opcode 0xf2*/ PTI_MODRM_TRUE,
+/*opcode 0xf3*/ PTI_MODRM_TRUE,
+/*opcode 0xf4*/ PTI_MODRM_TRUE,
+/*opcode 0xf5*/ PTI_MODRM_TRUE,
+/*opcode 0xf6*/ PTI_MODRM_TRUE,
+/*opcode 0xf7*/ PTI_MODRM_TRUE,
+/*opcode 0xf8*/ PTI_MODRM_TRUE,
+/*opcode 0xf9*/ PTI_MODRM_TRUE,
+/*opcode 0xfa*/ PTI_MODRM_TRUE,
+/*opcode 0xfb*/ PTI_MODRM_TRUE,
+/*opcode 0xfc*/ PTI_MODRM_TRUE,
+/*opcode 0xfd*/ PTI_MODRM_TRUE,
+/*opcode 0xfe*/ PTI_MODRM_TRUE,
+/*opcode 0xff*/ PTI_MODRM_UNDEF,
+};
diff --git a/contrib/processor-trace/libipt/internal/include/windows/pt_section_windows.h b/contrib/processor-trace/libipt/internal/include/windows/pt_section_windows.h
new file mode 100644
index 0000000000000..96b101532a9ba
--- /dev/null
+++ b/contrib/processor-trace/libipt/internal/include/windows/pt_section_windows.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PT_SECTION_WINDOWS_H
+#define PT_SECTION_WINDOWS_H
+
+#include <windows.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+struct pt_section;
+
+
+/* Fstat-based file status. */
+struct pt_sec_windows_status {
+ /* The file status. */
+ struct _stat stat;
+};
+
+/* FileView-based section mapping information. */
+struct pt_sec_windows_mapping {
+ /* The file descriptor. */
+ int fd;
+
+ /* The FileMapping handle. */
+ HANDLE mh;
+
+ /* The mmap base address. */
+ uint8_t *base;
+
+ /* The begin and end of the mapped memory. */
+ const uint8_t *begin, *end;
+};
+
+
+/* Map a section.
+ *
+ * The caller has already opened the file for reading.
+ *
+ * On success, sets @section's mapping, unmap, and read pointers.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_invalid if @section can't be mapped.
+ */
+extern int pt_sec_windows_map(struct pt_section *section, int fd);
+
+/* Unmap a section.
+ *
+ * On success, clears @section's mapping, unmap, and read pointers.
+ *
+ * This function should not be called directly; call @section->unmap() instead.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section is NULL.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_sec_windows_unmap(struct pt_section *section);
+
+/* Read memory from an mmaped section.
+ *
+ * Reads at most @size bytes from @section at @offset into @buffer.
+ *
+ * This function should not be called directly; call @section->read() instead.
+ *
+ * Returns the number of bytes read on success, a negative error code otherwise.
+ * Returns -pte_invalid if @section or @buffer are NULL.
+ * Returns -pte_nomap if @offset is beyond the end of the section.
+ */
+extern int pt_sec_windows_read(const struct pt_section *section,
+ uint8_t *buffer, uint16_t size,
+ uint64_t offset);
+
+/* Compute the memory size of a section.
+ *
+ * On success, provides the amount of memory used for mapping @section in bytes
+ * in @size.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_internal if @section or @size is NULL.
+ * Returns -pte_internal if @section has not been mapped.
+ */
+extern int pt_sec_windows_memsize(const struct pt_section *section,
+ uint64_t *size);
+
+#endif /* PT_SECTION_WINDOWS_H */
diff --git a/contrib/processor-trace/libipt/src/posix/init.c b/contrib/processor-trace/libipt/src/posix/init.c
new file mode 100644
index 0000000000000..dc20a432b02af
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/posix/init.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_ild.h"
+
+
+static void __attribute__((constructor)) init(void)
+{
+ /* Initialize the Intel(R) Processor Trace instruction decoder. */
+ pt_ild_init();
+}
diff --git a/contrib/processor-trace/libipt/src/posix/pt_cpuid.c b/contrib/processor-trace/libipt/src/posix/pt_cpuid.c
new file mode 100644
index 0000000000000..0ca755dd3fe9e
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/posix/pt_cpuid.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_cpuid.h"
+
+#include <cpuid.h>
+
+extern void pt_cpuid(uint32_t leaf, uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ __get_cpuid(leaf, eax, ebx, ecx, edx);
+}
diff --git a/contrib/processor-trace/libipt/src/posix/pt_section_posix.c b/contrib/processor-trace/libipt/src/posix/pt_section_posix.c
new file mode 100644
index 0000000000000..392ce4ecc6bb1
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/posix/pt_section_posix.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_section.h"
+#include "pt_section_posix.h"
+#include "pt_section_file.h"
+
+#include "intel-pt.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+
+int pt_section_mk_status(void **pstatus, uint64_t *psize, const char *filename)
+{
+ struct pt_sec_posix_status *status;
+ struct stat buffer;
+ int errcode;
+
+ if (!pstatus || !psize)
+ return -pte_internal;
+
+ errcode = stat(filename, &buffer);
+ if (errcode < 0)
+ return errcode;
+
+ if (buffer.st_size < 0)
+ return -pte_bad_image;
+
+ status = malloc(sizeof(*status));
+ if (!status)
+ return -pte_nomem;
+
+ status->stat = buffer;
+
+ *pstatus = status;
+ *psize = buffer.st_size;
+
+ return 0;
+}
+
+static int check_file_status(struct pt_section *section, int fd)
+{
+ struct pt_sec_posix_status *status;
+ struct stat stat;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = fstat(fd, &stat);
+ if (errcode)
+ return -pte_bad_image;
+
+ status = section->status;
+ if (!status)
+ return -pte_internal;
+
+ if (stat.st_size != status->stat.st_size)
+ return -pte_bad_image;
+
+ if (stat.st_mtime != status->stat.st_mtime)
+ return -pte_bad_image;
+
+ return 0;
+}
+
+int pt_sec_posix_map(struct pt_section *section, int fd)
+{
+ struct pt_sec_posix_mapping *mapping;
+ uint64_t offset, size, adjustment;
+ uint8_t *base;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ offset = section->offset;
+ size = section->size;
+
+ adjustment = offset % sysconf(_SC_PAGESIZE);
+
+ offset -= adjustment;
+ size += adjustment;
+
+ /* The section is supposed to fit into the file so we shouldn't
+ * see any overflows, here.
+ */
+ if (size < section->size)
+ return -pte_internal;
+
+ if (SIZE_MAX < size)
+ return -pte_nomem;
+
+ if (INT_MAX < offset)
+ return -pte_nomem;
+
+ base = mmap(NULL, (size_t) size, PROT_READ, MAP_SHARED, fd,
+ (off_t) offset);
+ if (base == MAP_FAILED)
+ return -pte_nomem;
+
+ mapping = malloc(sizeof(*mapping));
+ if (!mapping) {
+ errcode = -pte_nomem;
+ goto out_map;
+ }
+
+ mapping->base = base;
+ mapping->size = size;
+ mapping->begin = base + adjustment;
+ mapping->end = base + size;
+
+ section->mapping = mapping;
+ section->unmap = pt_sec_posix_unmap;
+ section->read = pt_sec_posix_read;
+ section->memsize = pt_sec_posix_memsize;
+
+ return 0;
+
+out_map:
+ munmap(base, (size_t) size);
+ return errcode;
+}
+
+static int pt_sec_posix_map_success(struct pt_section *section)
+{
+ uint16_t mcount;
+ int errcode, status;
+
+ if (!section)
+ return -pte_internal;
+
+ mcount = section->mcount + 1;
+ if (!mcount) {
+ (void) pt_section_unlock(section);
+ return -pte_overflow;
+ }
+
+ section->mcount = mcount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_section_on_map(section);
+ if (status < 0) {
+ /* We had to release the section lock for pt_section_on_map() so
+ * @section may have meanwhile been mapped by other threads.
+ *
+ * We still want to return the error so we release our mapping.
+ * Our caller does not yet know whether pt_section_map()
+ * succeeded.
+ */
+ (void) pt_section_unmap(section);
+ return status;
+ }
+
+ return 0;
+}
+
+int pt_section_map(struct pt_section *section)
+{
+ const char *filename;
+ FILE *file;
+ int fd, errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (section->mcount)
+ return pt_sec_posix_map_success(section);
+
+ if (section->mapping)
+ goto out_unlock;
+
+ filename = section->filename;
+ if (!filename)
+ goto out_unlock;
+
+ errcode = -pte_bad_image;
+ fd = open(filename, O_RDONLY);
+ if (fd == -1)
+ goto out_unlock;
+
+ errcode = check_file_status(section, fd);
+ if (errcode < 0)
+ goto out_fd;
+
+ /* We close the file on success. This does not unmap the section. */
+ errcode = pt_sec_posix_map(section, fd);
+ if (!errcode) {
+ close(fd);
+
+ return pt_sec_posix_map_success(section);
+ }
+
+ /* Fall back to file based sections - report the original error
+ * if we fail to convert the file descriptor.
+ */
+ file = fdopen(fd, "rb");
+ if (!file)
+ goto out_fd;
+
+ /* We need to keep the file open on success. It will be closed when
+ * the section is unmapped.
+ */
+ errcode = pt_sec_file_map(section, file);
+ if (!errcode)
+ return pt_sec_posix_map_success(section);
+
+ fclose(file);
+ goto out_unlock;
+
+out_fd:
+ close(fd);
+
+out_unlock:
+ (void) pt_section_unlock(section);
+ return errcode;
+}
+
+int pt_sec_posix_unmap(struct pt_section *section)
+{
+ struct pt_sec_posix_mapping *mapping;
+
+ if (!section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping || !section->unmap || !section->read || !section->memsize)
+ return -pte_internal;
+
+ section->mapping = NULL;
+ section->unmap = NULL;
+ section->read = NULL;
+ section->memsize = NULL;
+
+ munmap(mapping->base, (size_t) mapping->size);
+ free(mapping);
+
+ return 0;
+}
+
+int pt_sec_posix_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ struct pt_sec_posix_mapping *mapping;
+ const uint8_t *begin;
+
+ if (!buffer || !section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping)
+ return -pte_internal;
+
+ /* We already checked in pt_section_read() that the requested memory
+ * lies within the section's boundaries.
+ *
+ * And we checked that the entire section was mapped. There's no need
+ * to check for overflows, again.
+ */
+ begin = mapping->begin + offset;
+
+ memcpy(buffer, begin, size);
+ return (int) size;
+}
+
+int pt_sec_posix_memsize(const struct pt_section *section, uint64_t *size)
+{
+ struct pt_sec_posix_mapping *mapping;
+ const uint8_t *begin, *end;
+
+ if (!section || !size)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping)
+ return -pte_internal;
+
+ begin = mapping->base;
+ end = mapping->end;
+
+ if (!begin || !end || end < begin)
+ return -pte_internal;
+
+ *size = (uint64_t) (end - begin);
+
+ return 0;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_asid.c b/contrib/processor-trace/libipt/src/pt_asid.c
new file mode 100644
index 0000000000000..f492e0f7fd677
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_asid.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_asid.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+
+
+int pt_asid_from_user(struct pt_asid *asid, const struct pt_asid *user)
+{
+ if (!asid)
+ return -pte_internal;
+
+ pt_asid_init(asid);
+
+ if (user) {
+ size_t size;
+
+ size = user->size;
+
+ /* Ignore fields in the user's asid we don't know. */
+ if (sizeof(*asid) < size)
+ size = sizeof(*asid);
+
+ /* Copy (portions of) the user's asid. */
+ memcpy(asid, user, size);
+
+ /* We copied user's size - fix it. */
+ asid->size = sizeof(*asid);
+ }
+
+ return 0;
+}
+
+int pt_asid_to_user(struct pt_asid *user, const struct pt_asid *asid,
+ size_t size)
+{
+ if (!user || !asid)
+ return -pte_internal;
+
+ /* We need at least space for the size field. */
+ if (size < sizeof(asid->size))
+ return -pte_invalid;
+
+ /* Only provide the fields we actually have. */
+ if (sizeof(*asid) < size)
+ size = sizeof(*asid);
+
+ /* Copy (portions of) our asid to the user's. */
+ memcpy(user, asid, size);
+
+ /* We copied our size - fix it. */
+ user->size = size;
+
+ return 0;
+}
+
+int pt_asid_match(const struct pt_asid *lhs, const struct pt_asid *rhs)
+{
+ uint64_t lcr3, rcr3, lvmcs, rvmcs;
+
+ if (!lhs || !rhs)
+ return -pte_internal;
+
+ lcr3 = lhs->cr3;
+ rcr3 = rhs->cr3;
+
+ if (lcr3 != rcr3 && lcr3 != pt_asid_no_cr3 && rcr3 != pt_asid_no_cr3)
+ return 0;
+
+ lvmcs = lhs->vmcs;
+ rvmcs = rhs->vmcs;
+
+ if (lvmcs != rvmcs && lvmcs != pt_asid_no_vmcs &&
+ rvmcs != pt_asid_no_vmcs)
+ return 0;
+
+ return 1;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_block_cache.c b/contrib/processor-trace/libipt/src/pt_block_cache.c
new file mode 100644
index 0000000000000..abe6ea1f3ca2e
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_block_cache.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_block_cache.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+
+struct pt_block_cache *pt_bcache_alloc(uint64_t nentries)
+{
+ struct pt_block_cache *bcache;
+ uint64_t size;
+
+ if (!nentries || (UINT32_MAX < nentries))
+ return NULL;
+
+ size = sizeof(*bcache) + (nentries * sizeof(struct pt_bcache_entry));
+ if (SIZE_MAX < size)
+ return NULL;
+
+ bcache = malloc((size_t) size);
+ if (!bcache)
+ return NULL;
+
+ memset(bcache, 0, (size_t) size);
+ bcache->nentries = (uint32_t) nentries;
+
+ return bcache;
+}
+
+void pt_bcache_free(struct pt_block_cache *bcache)
+{
+ free(bcache);
+}
+
+int pt_bcache_add(struct pt_block_cache *bcache, uint64_t index,
+ struct pt_bcache_entry bce)
+{
+ if (!bcache)
+ return -pte_internal;
+
+ if (bcache->nentries <= index)
+ return -pte_internal;
+
+ /* We rely on guaranteed atomic operations as specified in section 8.1.1
+ * in Volume 3A of the Intel(R) Software Developer's Manual at
+ * http://www.intel.com/sdm.
+ */
+ bcache->entry[(uint32_t) index] = bce;
+
+ return 0;
+}
+
+int pt_bcache_lookup(struct pt_bcache_entry *bce,
+ const struct pt_block_cache *bcache, uint64_t index)
+{
+ if (!bce || !bcache)
+ return -pte_internal;
+
+ if (bcache->nentries <= index)
+ return -pte_internal;
+
+ /* We rely on guaranteed atomic operations as specified in section 8.1.1
+ * in Volume 3A of the Intel(R) Software Developer's Manual at
+ * http://www.intel.com/sdm.
+ */
+ *bce = bcache->entry[(uint32_t) index];
+
+ return 0;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_block_decoder.c b/contrib/processor-trace/libipt/src/pt_block_decoder.c
new file mode 100644
index 0000000000000..d6c816db65cc0
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_block_decoder.c
@@ -0,0 +1,3469 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_block_decoder.h"
+#include "pt_block_cache.h"
+#include "pt_section.h"
+#include "pt_image.h"
+#include "pt_insn.h"
+#include "pt_config.h"
+#include "pt_asid.h"
+#include "pt_compiler.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+
+static int pt_blk_proceed_trailing_event(struct pt_block_decoder *,
+ struct pt_block *);
+
+
+static int pt_blk_status(const struct pt_block_decoder *decoder, int flags)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = decoder->status;
+
+ /* Indicate whether tracing is disabled or enabled.
+ *
+ * This duplicates the indication in struct pt_insn and covers the case
+ * where we indicate the status after synchronizing.
+ */
+ if (!decoder->enabled)
+ flags |= pts_ip_suppressed;
+
+ /* Forward end-of-trace indications.
+ *
+ * Postpone it as long as we're still processing events, though.
+ */
+ if ((status & pts_eos) && !decoder->process_event)
+ flags |= pts_eos;
+
+ return flags;
+}
+
+static void pt_blk_reset(struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ decoder->mode = ptem_unknown;
+ decoder->ip = 0ull;
+ decoder->status = 0;
+ decoder->enabled = 0;
+ decoder->process_event = 0;
+ decoder->speculative = 0;
+ decoder->process_insn = 0;
+ decoder->bound_paging = 0;
+ decoder->bound_vmcs = 0;
+ decoder->bound_ptwrite = 0;
+
+ memset(&decoder->event, 0, sizeof(decoder->event));
+ pt_retstack_init(&decoder->retstack);
+ pt_asid_init(&decoder->asid);
+}
+
+/* Initialize the query decoder flags based on our flags. */
+
+static int pt_blk_init_qry_flags(struct pt_conf_flags *qflags,
+ const struct pt_conf_flags *flags)
+{
+ if (!qflags || !flags)
+ return -pte_internal;
+
+ memset(qflags, 0, sizeof(*qflags));
+
+ return 0;
+}
+
+int pt_blk_decoder_init(struct pt_block_decoder *decoder,
+ const struct pt_config *uconfig)
+{
+ struct pt_config config;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ errcode = pt_config_from_user(&config, uconfig);
+ if (errcode < 0)
+ return errcode;
+
+ /* The user supplied decoder flags. */
+ decoder->flags = config.flags;
+
+ /* Set the flags we need for the query decoder we use. */
+ errcode = pt_blk_init_qry_flags(&config.flags, &decoder->flags);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_qry_decoder_init(&decoder->query, &config);
+ if (errcode < 0)
+ return errcode;
+
+ pt_image_init(&decoder->default_image, NULL);
+ decoder->image = &decoder->default_image;
+
+ errcode = pt_msec_cache_init(&decoder->scache);
+ if (errcode < 0)
+ return errcode;
+
+ pt_blk_reset(decoder);
+
+ return 0;
+}
+
+void pt_blk_decoder_fini(struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ pt_msec_cache_fini(&decoder->scache);
+ pt_image_fini(&decoder->default_image);
+ pt_qry_decoder_fini(&decoder->query);
+}
+
+struct pt_block_decoder *
+pt_blk_alloc_decoder(const struct pt_config *config)
+{
+ struct pt_block_decoder *decoder;
+ int errcode;
+
+ decoder = malloc(sizeof(*decoder));
+ if (!decoder)
+ return NULL;
+
+ errcode = pt_blk_decoder_init(decoder, config);
+ if (errcode < 0) {
+ free(decoder);
+ return NULL;
+ }
+
+ return decoder;
+}
+
+void pt_blk_free_decoder(struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ pt_blk_decoder_fini(decoder);
+ free(decoder);
+}
+
+/* Maybe synthesize a tick event.
+ *
+ * If we're not already processing events, check the current time against the
+ * last event's time. If it changed, synthesize a tick event with the new time.
+ *
+ * Returns zero if no tick event has been created.
+ * Returns a positive integer if a tick event has been created.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_tick(struct pt_block_decoder *decoder, uint64_t ip)
+{
+ struct pt_event *ev;
+ uint64_t tsc;
+ uint32_t lost_mtc, lost_cyc;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* We're not generating tick events if tracing is disabled. */
+ if (!decoder->enabled)
+ return -pte_internal;
+
+ /* Events already provide a timestamp so there is no need to synthesize
+ * an artificial tick event. There's no room, either, since this would
+ * overwrite the in-progress event.
+ *
+ * In rare cases where we need to proceed to an event location using
+ * trace this may cause us to miss a timing update if the event is not
+ * forwarded to the user.
+ *
+ * The only case I can come up with at the moment is a MODE.EXEC binding
+ * to the TIP IP of a far branch.
+ */
+ if (decoder->process_event)
+ return 0;
+
+ errcode = pt_qry_time(&decoder->query, &tsc, &lost_mtc, &lost_cyc);
+ if (errcode < 0) {
+ /* If we don't have wall-clock time, we use relative time. */
+ if (errcode != -pte_no_time)
+ return errcode;
+ }
+
+ ev = &decoder->event;
+
+ /* We're done if time has not changed since the last event. */
+ if (tsc == ev->tsc)
+ return 0;
+
+ /* Time has changed so we create a new tick event. */
+ memset(ev, 0, sizeof(*ev));
+ ev->type = ptev_tick;
+ ev->variant.tick.ip = ip;
+
+ /* Indicate if we have wall-clock time or only relative time. */
+ if (errcode != -pte_no_time)
+ ev->has_tsc = 1;
+ ev->tsc = tsc;
+ ev->lost_mtc = lost_mtc;
+ ev->lost_cyc = lost_cyc;
+
+ /* We now have an event to process. */
+ decoder->process_event = 1;
+
+ return 1;
+}
+
+/* Query an indirect branch.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_indirect_branch(struct pt_block_decoder *decoder,
+ uint64_t *ip)
+{
+ uint64_t evip;
+ int status, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ evip = decoder->ip;
+
+ status = pt_qry_indirect_branch(&decoder->query, ip);
+ if (status < 0)
+ return status;
+
+ if (decoder->flags.variant.block.enable_tick_events) {
+ errcode = pt_blk_tick(decoder, evip);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return status;
+}
+
+/* Query a conditional branch.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_cond_branch(struct pt_block_decoder *decoder, int *taken)
+{
+ int status, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_cond_branch(&decoder->query, taken);
+ if (status < 0)
+ return status;
+
+ if (decoder->flags.variant.block.enable_tick_events) {
+ errcode = pt_blk_tick(decoder, decoder->ip);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return status;
+}
+
+static int pt_blk_start(struct pt_block_decoder *decoder, int status)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ if (status < 0)
+ return status;
+
+ decoder->status = status;
+ if (!(status & pts_ip_suppressed))
+ decoder->enabled = 1;
+
+ /* We will always have an event.
+ *
+ * If we synchronized onto an empty PSB+, tracing is disabled and we'll
+ * process events until the enabled event.
+ *
+ * If tracing is enabled, PSB+ must at least provide the execution mode,
+ * which we're going to forward to the user.
+ */
+ return pt_blk_proceed_trailing_event(decoder, NULL);
+}
+
+static int pt_blk_sync_reset(struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ pt_blk_reset(decoder);
+
+ return 0;
+}
+
+int pt_blk_sync_forward(struct pt_block_decoder *decoder)
+{
+ int errcode, status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ errcode = pt_blk_sync_reset(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_qry_sync_forward(&decoder->query, &decoder->ip);
+
+ return pt_blk_start(decoder, status);
+}
+
+int pt_blk_sync_backward(struct pt_block_decoder *decoder)
+{
+ int errcode, status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ errcode = pt_blk_sync_reset(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_qry_sync_backward(&decoder->query, &decoder->ip);
+
+ return pt_blk_start(decoder, status);
+}
+
+int pt_blk_sync_set(struct pt_block_decoder *decoder, uint64_t offset)
+{
+ int errcode, status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ errcode = pt_blk_sync_reset(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_qry_sync_set(&decoder->query, &decoder->ip, offset);
+
+ return pt_blk_start(decoder, status);
+}
+
+int pt_blk_get_offset(const struct pt_block_decoder *decoder, uint64_t *offset)
+{
+ if (!decoder)
+ return -pte_invalid;
+
+ return pt_qry_get_offset(&decoder->query, offset);
+}
+
+int pt_blk_get_sync_offset(const struct pt_block_decoder *decoder,
+ uint64_t *offset)
+{
+ if (!decoder)
+ return -pte_invalid;
+
+ return pt_qry_get_sync_offset(&decoder->query, offset);
+}
+
+struct pt_image *pt_blk_get_image(struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return NULL;
+
+ return decoder->image;
+}
+
+int pt_blk_set_image(struct pt_block_decoder *decoder, struct pt_image *image)
+{
+ if (!decoder)
+ return -pte_invalid;
+
+ if (!image)
+ image = &decoder->default_image;
+
+ decoder->image = image;
+ return 0;
+}
+
+const struct pt_config *
+pt_blk_get_config(const struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return NULL;
+
+ return pt_qry_get_config(&decoder->query);
+}
+
+int pt_blk_time(struct pt_block_decoder *decoder, uint64_t *time,
+ uint32_t *lost_mtc, uint32_t *lost_cyc)
+{
+ if (!decoder || !time)
+ return -pte_invalid;
+
+ return pt_qry_time(&decoder->query, time, lost_mtc, lost_cyc);
+}
+
+int pt_blk_core_bus_ratio(struct pt_block_decoder *decoder, uint32_t *cbr)
+{
+ if (!decoder || !cbr)
+ return -pte_invalid;
+
+ return pt_qry_core_bus_ratio(&decoder->query, cbr);
+}
+
+int pt_blk_asid(const struct pt_block_decoder *decoder, struct pt_asid *asid,
+ size_t size)
+{
+ if (!decoder || !asid)
+ return -pte_invalid;
+
+ return pt_asid_to_user(asid, &decoder->asid, size);
+}
+
+/* Fetch the next pending event.
+ *
+ * Checks for pending events. If an event is pending, fetches it (if not
+ * already in process).
+ *
+ * Returns zero if no event is pending.
+ * Returns a positive integer if an event is pending or in process.
+ * Returns a negative error code otherwise.
+ */
+static inline int pt_blk_fetch_event(struct pt_block_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ if (decoder->process_event)
+ return 1;
+
+ if (!(decoder->status & pts_event_pending))
+ return 0;
+
+ status = pt_qry_event(&decoder->query, &decoder->event,
+ sizeof(decoder->event));
+ if (status < 0)
+ return status;
+
+ decoder->process_event = 1;
+ decoder->status = status;
+
+ return 1;
+}
+
+static inline int pt_blk_block_is_empty(const struct pt_block *block)
+{
+ if (!block)
+ return 1;
+
+ return !block->ninsn;
+}
+
+static inline int block_to_user(struct pt_block *ublock, size_t size,
+ const struct pt_block *block)
+{
+ if (!ublock || !block)
+ return -pte_internal;
+
+ if (ublock == block)
+ return 0;
+
+ /* Zero out any unknown bytes. */
+ if (sizeof(*block) < size) {
+ memset(ublock + sizeof(*block), 0, size - sizeof(*block));
+
+ size = sizeof(*block);
+ }
+
+ memcpy(ublock, block, size);
+
+ return 0;
+}
+
+static int pt_insn_false(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ (void) insn;
+ (void) iext;
+
+ return 0;
+}
+
+/* Determine the next IP using trace.
+ *
+ * Tries to determine the IP of the next instruction using trace and provides it
+ * in @pip.
+ *
+ * Not requiring trace to determine the IP is treated as an internal error.
+ *
+ * Does not update the return compression stack for indirect calls. This is
+ * expected to have been done, already, when trying to determine the next IP
+ * without using trace.
+ *
+ * Does not update @decoder->status. The caller is expected to do that.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ * Returns -pte_internal if @pip, @decoder, @insn, or @iext are NULL.
+ * Returns -pte_internal if no trace is required.
+ */
+static int pt_blk_next_ip(uint64_t *pip, struct pt_block_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ int status, errcode;
+
+ if (!pip || !decoder || !insn || !iext)
+ return -pte_internal;
+
+ /* We handle non-taken conditional branches, and compressed returns
+ * directly in the switch.
+ *
+ * All kinds of branches are handled below the switch.
+ */
+ switch (insn->iclass) {
+ case ptic_cond_jump: {
+ uint64_t ip;
+ int taken;
+
+ status = pt_blk_cond_branch(decoder, &taken);
+ if (status < 0)
+ return status;
+
+ ip = insn->ip + insn->size;
+ if (taken)
+ ip += iext->variant.branch.displacement;
+
+ *pip = ip;
+ return status;
+ }
+
+ case ptic_return: {
+ int taken;
+
+ /* Check for a compressed return. */
+ status = pt_blk_cond_branch(decoder, &taken);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ break;
+ }
+
+ /* A compressed return is indicated by a taken conditional
+ * branch.
+ */
+ if (!taken)
+ return -pte_bad_retcomp;
+
+ errcode = pt_retstack_pop(&decoder->retstack, pip);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+ }
+
+ case ptic_jump:
+ case ptic_call:
+ /* A direct jump or call wouldn't require trace. */
+ if (iext->variant.branch.is_direct)
+ return -pte_internal;
+
+ break;
+
+ case ptic_far_call:
+ case ptic_far_return:
+ case ptic_far_jump:
+ break;
+
+ case ptic_ptwrite:
+ case ptic_other:
+ return -pte_internal;
+
+ case ptic_error:
+ return -pte_bad_insn;
+ }
+
+ /* Process an indirect branch.
+ *
+ * This covers indirect jumps and calls, non-compressed returns, and all
+ * flavors of far transfers.
+ */
+ return pt_blk_indirect_branch(decoder, pip);
+}
+
+/* Proceed to the next IP using trace.
+ *
+ * We failed to proceed without trace. This ends the current block. Now use
+ * trace to do one final step to determine the start IP of the next block.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_proceed_with_trace(struct pt_block_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_blk_next_ip(&decoder->ip, decoder, insn, iext);
+ if (status < 0)
+ return status;
+
+ /* Preserve the query decoder's response which indicates upcoming
+ * events.
+ */
+ decoder->status = status;
+
+ /* We do need an IP in order to proceed. */
+ if (status & pts_ip_suppressed)
+ return -pte_noip;
+
+ return 0;
+}
+
+/* Decode one instruction in a known section.
+ *
+ * Decode the instruction at @insn->ip in @msec assuming execution mode
+ * @insn->mode.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_decode_in_section(struct pt_insn *insn,
+ struct pt_insn_ext *iext,
+ const struct pt_mapped_section *msec)
+{
+ int status;
+
+ if (!insn || !iext)
+ return -pte_internal;
+
+ /* We know that @ip is contained in @section.
+ *
+ * Note that we need to translate @ip into a section offset.
+ */
+ status = pt_msec_read(msec, insn->raw, sizeof(insn->raw), insn->ip);
+ if (status < 0)
+ return status;
+
+ /* We initialize @insn->size to the maximal possible size. It will be
+ * set to the actual size during instruction decode.
+ */
+ insn->size = (uint8_t) status;
+
+ return pt_ild_decode(insn, iext);
+}
+
+/* Update the return-address stack if @insn is a near call.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static inline int pt_blk_log_call(struct pt_block_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!decoder || !insn || !iext)
+ return -pte_internal;
+
+ if (insn->iclass != ptic_call)
+ return 0;
+
+ /* Ignore direct calls to the next instruction that are used for
+ * position independent code.
+ */
+ if (iext->variant.branch.is_direct &&
+ !iext->variant.branch.displacement)
+ return 0;
+
+ return pt_retstack_push(&decoder->retstack, insn->ip + insn->size);
+}
+
+/* Proceed by one instruction.
+ *
+ * Tries to decode the instruction at @decoder->ip and, on success, adds it to
+ * @block and provides it in @pinsn and @piext.
+ *
+ * The instruction will not be added if:
+ *
+ * - the memory could not be read: return error
+ * - it could not be decoded: return error
+ * - @block is already full: return zero
+ * - @block would switch sections: return zero
+ *
+ * Returns a positive integer if the instruction was added.
+ * Returns zero if the instruction didn't fit into @block.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_one_insn(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ struct pt_insn *pinsn,
+ struct pt_insn_ext *piext)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ uint16_t ninsn;
+ int status;
+
+ if (!decoder || !block || !pinsn || !piext)
+ return -pte_internal;
+
+ /* There's nothing to do if there is no room in @block. */
+ ninsn = block->ninsn + 1;
+ if (!ninsn)
+ return 0;
+
+ /* The truncated instruction must be last. */
+ if (block->truncated)
+ return 0;
+
+ memset(&insn, 0, sizeof(insn));
+ memset(&iext, 0, sizeof(iext));
+
+ insn.mode = decoder->mode;
+ insn.ip = decoder->ip;
+
+ status = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid);
+ if (status < 0)
+ return status;
+
+ /* We do not switch sections inside a block. */
+ if (insn.isid != block->isid) {
+ if (!pt_blk_block_is_empty(block))
+ return 0;
+
+ block->isid = insn.isid;
+ }
+
+ /* If we couldn't read @insn's memory in one chunk from @insn.isid, we
+ * provide the memory in @block.
+ */
+ if (insn.truncated) {
+ memcpy(block->raw, insn.raw, insn.size);
+ block->size = insn.size;
+ block->truncated = 1;
+ }
+
+ /* Log calls' return addresses for return compression. */
+ status = pt_blk_log_call(decoder, &insn, &iext);
+ if (status < 0)
+ return status;
+
+ /* We have a new instruction. */
+ block->iclass = insn.iclass;
+ block->end_ip = insn.ip;
+ block->ninsn = ninsn;
+
+ *pinsn = insn;
+ *piext = iext;
+
+ return 1;
+}
+
+
+/* Proceed to a particular type of instruction without using trace.
+ *
+ * Proceed until we reach an instruction for which @predicate returns a positive
+ * integer or until:
+ *
+ * - @predicate returns an error: return error
+ * - @block is full: return zero
+ * - @block would switch sections: return zero
+ * - we would need trace: return -pte_bad_query
+ *
+ * Provide the last instruction that was reached in @insn and @iext.
+ *
+ * Update @decoder->ip to point to the last IP that was reached. If we fail due
+ * to lack of trace or if we reach a desired instruction, this is @insn->ip;
+ * otherwise this is the next instruction's IP.
+ *
+ * Returns a positive integer if a suitable instruction was reached.
+ * Returns zero if no such instruction was reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_insn(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ struct pt_insn *insn,
+ struct pt_insn_ext *iext,
+ int (*predicate)(const struct pt_insn *,
+ const struct pt_insn_ext *))
+{
+ int status;
+
+ if (!decoder || !insn || !predicate)
+ return -pte_internal;
+
+ for (;;) {
+ status = pt_blk_proceed_one_insn(decoder, block, insn, iext);
+ if (status <= 0)
+ return status;
+
+ /* We're done if this instruction matches the spec (positive
+ * status) or we run into an error (negative status).
+ */
+ status = predicate(insn, iext);
+ if (status != 0)
+ return status;
+
+ /* Let's see if we can proceed to the next IP without trace. */
+ status = pt_insn_next_ip(&decoder->ip, insn, iext);
+ if (status < 0)
+ return status;
+
+ /* End the block if the user asked us to.
+ *
+ * We only need to take care about direct near branches.
+ * Indirect and far branches require trace and will naturally
+ * end a block.
+ */
+ if ((decoder->flags.variant.block.end_on_call &&
+ (insn->iclass == ptic_call)) ||
+ (decoder->flags.variant.block.end_on_jump &&
+ (insn->iclass == ptic_jump)))
+ return 0;
+ }
+}
+
+/* Proceed to a particular IP without using trace.
+ *
+ * Proceed until we reach @ip or until:
+ *
+ * - @block is full: return zero
+ * - @block would switch sections: return zero
+ * - we would need trace: return -pte_bad_query
+ *
+ * Provide the last instruction that was reached in @insn and @iext. If we
+ * reached @ip, this is the instruction preceding it.
+ *
+ * Update @decoder->ip to point to the last IP that was reached. If we fail due
+ * to lack of trace, this is @insn->ip; otherwise this is the next instruction's
+ * IP.
+ *
+ * Returns a positive integer if @ip was reached.
+ * Returns zero if no such instruction was reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_ip(struct pt_block_decoder *decoder,
+ struct pt_block *block, struct pt_insn *insn,
+ struct pt_insn_ext *iext, uint64_t ip)
+{
+ int status;
+
+ if (!decoder || !insn)
+ return -pte_internal;
+
+ for (;;) {
+ /* We're done when we reach @ip. We may not even have to decode
+ * a single instruction in some cases.
+ */
+ if (decoder->ip == ip)
+ return 1;
+
+ status = pt_blk_proceed_one_insn(decoder, block, insn, iext);
+ if (status <= 0)
+ return status;
+
+ /* Let's see if we can proceed to the next IP without trace. */
+ status = pt_insn_next_ip(&decoder->ip, insn, iext);
+ if (status < 0)
+ return status;
+
+ /* End the block if the user asked us to.
+ *
+ * We only need to take care about direct near branches.
+ * Indirect and far branches require trace and will naturally
+ * end a block.
+ *
+ * The call at the end of the block may have reached @ip; make
+ * sure to indicate that.
+ */
+ if ((decoder->flags.variant.block.end_on_call &&
+ (insn->iclass == ptic_call)) ||
+ (decoder->flags.variant.block.end_on_jump &&
+ (insn->iclass == ptic_jump))) {
+ return (decoder->ip == ip ? 1 : 0);
+ }
+ }
+}
+
+/* Proceed to a particular IP with trace, if necessary.
+ *
+ * Proceed until we reach @ip or until:
+ *
+ * - @block is full: return zero
+ * - @block would switch sections: return zero
+ * - we need trace: return zero
+ *
+ * Update @decoder->ip to point to the last IP that was reached.
+ *
+ * A return of zero ends @block.
+ *
+ * Returns a positive integer if @ip was reached.
+ * Returns zero if no such instruction was reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_ip_with_trace(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ uint64_t ip)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int status;
+
+ /* Try to reach @ip without trace.
+ *
+ * We're also OK if @block overflowed or we switched sections and we
+ * have to try again in the next iteration.
+ */
+ status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext, ip);
+ if (status != -pte_bad_query)
+ return status;
+
+ /* Needing trace is not an error. We use trace to determine the next
+ * start IP and end the block.
+ */
+ return pt_blk_proceed_with_trace(decoder, &insn, &iext);
+}
+
+static int pt_insn_skl014(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!insn || !iext)
+ return 0;
+
+ switch (insn->iclass) {
+ default:
+ return 0;
+
+ case ptic_call:
+ case ptic_jump:
+ return iext->variant.branch.is_direct;
+
+ case ptic_other:
+ return pt_insn_changes_cr3(insn, iext);
+ }
+}
+
+/* Proceed to the location of a synchronous disabled event with suppressed IP
+ * considering SKL014.
+ *
+ * We have a (synchronous) disabled event pending. Proceed to the event
+ * location and indicate whether we were able to reach it.
+ *
+ * With SKL014 a TIP.PGD with suppressed IP may also be generated by a direct
+ * unconditional branch that clears FilterEn by jumping out of a filter region
+ * or into a TraceStop region. Use the filter configuration to determine the
+ * exact branch the event binds to.
+ *
+ * The last instruction that was reached is stored in @insn/@iext.
+ *
+ * Returns a positive integer if the event location was reached.
+ * Returns zero if the event location was not reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_skl014(struct pt_block_decoder *decoder,
+ struct pt_block *block, struct pt_insn *insn,
+ struct pt_insn_ext *iext)
+{
+ const struct pt_conf_addr_filter *addr_filter;
+ int status;
+
+ if (!decoder || !block || !insn || !iext)
+ return -pte_internal;
+
+ addr_filter = &decoder->query.config.addr_filter;
+ for (;;) {
+ uint64_t ip;
+
+ status = pt_blk_proceed_to_insn(decoder, block, insn, iext,
+ pt_insn_skl014);
+ if (status <= 0)
+ break;
+
+ /* The erratum doesn't apply if we can bind the event to a
+ * CR3-changing instruction.
+ */
+ if (pt_insn_changes_cr3(insn, iext))
+ break;
+
+ /* Check the filter against the branch target. */
+ status = pt_insn_next_ip(&ip, insn, iext);
+ if (status < 0)
+ break;
+
+ status = pt_filter_addr_check(addr_filter, ip);
+ if (status <= 0) {
+ /* We need to flip the indication.
+ *
+ * We reached the event location when @ip lies inside a
+ * tracing-disabled region.
+ */
+ if (!status)
+ status = 1;
+
+ break;
+ }
+
+ /* This is not the correct instruction. Proceed past it and try
+ * again.
+ */
+ decoder->ip = ip;
+
+ /* End the block if the user asked us to.
+ *
+ * We only need to take care about direct near branches.
+ * Indirect and far branches require trace and will naturally
+ * end a block.
+ */
+ if ((decoder->flags.variant.block.end_on_call &&
+ (insn->iclass == ptic_call)) ||
+ (decoder->flags.variant.block.end_on_jump &&
+ (insn->iclass == ptic_jump)))
+ break;
+ }
+
+ return status;
+}
+
+/* Proceed to the event location for a disabled event.
+ *
+ * We have a (synchronous) disabled event pending. Proceed to the event
+ * location and indicate whether we were able to reach it.
+ *
+ * The last instruction that was reached is stored in @insn/@iext.
+ *
+ * Returns a positive integer if the event location was reached.
+ * Returns zero if the event location was not reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_disabled(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ struct pt_insn *insn,
+ struct pt_insn_ext *iext,
+ const struct pt_event *ev)
+{
+ if (!decoder || !block || !ev)
+ return -pte_internal;
+
+ if (ev->ip_suppressed) {
+ /* Due to SKL014 the TIP.PGD payload may be suppressed also for
+ * direct branches.
+ *
+ * If we don't have a filter configuration we assume that no
+ * address filters were used and the erratum does not apply.
+ *
+ * We might otherwise disable tracing too early.
+ */
+ if (decoder->query.config.addr_filter.config.addr_cfg &&
+ decoder->query.config.errata.skl014)
+ return pt_blk_proceed_skl014(decoder, block, insn,
+ iext);
+
+ /* A synchronous disabled event also binds to far branches and
+ * CPL-changing instructions. Both would require trace,
+ * however, and are thus implicitly handled by erroring out.
+ *
+ * The would-require-trace error is handled by our caller.
+ */
+ return pt_blk_proceed_to_insn(decoder, block, insn, iext,
+ pt_insn_changes_cr3);
+ } else
+ return pt_blk_proceed_to_ip(decoder, block, insn, iext,
+ ev->variant.disabled.ip);
+}
+
+/* Set the expected resume address for a synchronous disable.
+ *
+ * On a synchronous disable, @decoder->ip still points to the instruction to
+ * which the event bound. That's not where we expect tracing to resume.
+ *
+ * For calls, a fair assumption is that tracing resumes after returning from the
+ * called function. For other types of instructions, we simply don't know.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_blk_set_disable_resume_ip(struct pt_block_decoder *decoder,
+ const struct pt_insn *insn)
+{
+ if (!decoder || !insn)
+ return -pte_internal;
+
+ switch (insn->iclass) {
+ case ptic_call:
+ case ptic_far_call:
+ decoder->ip = insn->ip + insn->size;
+ break;
+
+ default:
+ decoder->ip = 0ull;
+ break;
+ }
+
+ return 0;
+}
+
+/* Proceed to the event location for an async paging event.
+ *
+ * We have an async paging event pending. Proceed to the event location and
+ * indicate whether we were able to reach it. Needing trace in order to proceed
+ * is not an error in this case but ends the block.
+ *
+ * Returns a positive integer if the event location was reached.
+ * Returns zero if the event location was not reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_async_paging(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ const struct pt_event *ev)
+{
+ int status;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* Apply the event immediately if we don't have an IP. */
+ if (ev->ip_suppressed)
+ return 1;
+
+ status = pt_blk_proceed_to_ip_with_trace(decoder, block,
+ ev->variant.async_paging.ip);
+ if (status < 0)
+ return status;
+
+ /* We may have reached the IP. */
+ return (decoder->ip == ev->variant.async_paging.ip ? 1 : 0);
+}
+
+/* Proceed to the event location for an async vmcs event.
+ *
+ * We have an async vmcs event pending. Proceed to the event location and
+ * indicate whether we were able to reach it. Needing trace in order to proceed
+ * is not an error in this case but ends the block.
+ *
+ * Returns a positive integer if the event location was reached.
+ * Returns zero if the event location was not reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_async_vmcs(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ const struct pt_event *ev)
+{
+ int status;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* Apply the event immediately if we don't have an IP. */
+ if (ev->ip_suppressed)
+ return 1;
+
+ status = pt_blk_proceed_to_ip_with_trace(decoder, block,
+ ev->variant.async_vmcs.ip);
+ if (status < 0)
+ return status;
+
+ /* We may have reached the IP. */
+ return (decoder->ip == ev->variant.async_vmcs.ip ? 1 : 0);
+}
+
+/* Proceed to the event location for an exec mode event.
+ *
+ * We have an exec mode event pending. Proceed to the event location and
+ * indicate whether we were able to reach it. Needing trace in order to proceed
+ * is not an error in this case but ends the block.
+ *
+ * Returns a positive integer if the event location was reached.
+ * Returns zero if the event location was not reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_exec_mode(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ const struct pt_event *ev)
+{
+ int status;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* Apply the event immediately if we don't have an IP. */
+ if (ev->ip_suppressed)
+ return 1;
+
+ status = pt_blk_proceed_to_ip_with_trace(decoder, block,
+ ev->variant.exec_mode.ip);
+ if (status < 0)
+ return status;
+
+ /* We may have reached the IP. */
+ return (decoder->ip == ev->variant.exec_mode.ip ? 1 : 0);
+}
+
+/* Proceed to the event location for a ptwrite event.
+ *
+ * We have a ptwrite event pending. Proceed to the event location and indicate
+ * whether we were able to reach it.
+ *
+ * In case of the event binding to a ptwrite instruction, we pass beyond that
+ * instruction and update the event to provide the instruction's IP.
+ *
+ * In the case of the event binding to an IP provided in the event, we move
+ * beyond the instruction at that IP.
+ *
+ * Returns a positive integer if the event location was reached.
+ * Returns zero if the event location was not reached.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_proceed_to_ptwrite(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ struct pt_insn *insn,
+ struct pt_insn_ext *iext,
+ struct pt_event *ev)
+{
+ int status;
+
+ if (!insn || !ev)
+ return -pte_internal;
+
+ /* If we don't have an IP, the event binds to the next PTWRITE
+ * instruction.
+ *
+ * If we have an IP it still binds to the next PTWRITE instruction but
+ * now the IP tells us where that instruction is. This makes most sense
+ * when tracing is disabled and we don't have any other means of finding
+ * the PTWRITE instruction. We nevertheless distinguish the two cases,
+ * here.
+ *
+ * In both cases, we move beyond the PTWRITE instruction, so it will be
+ * the last instruction in the current block and @decoder->ip will point
+ * to the instruction following it.
+ */
+ if (ev->ip_suppressed) {
+ status = pt_blk_proceed_to_insn(decoder, block, insn, iext,
+ pt_insn_is_ptwrite);
+ if (status <= 0)
+ return status;
+
+ /* We now know the IP of the PTWRITE instruction corresponding
+ * to this event. Fill it in to make it more convenient for the
+ * user to process the event.
+ */
+ ev->variant.ptwrite.ip = insn->ip;
+ ev->ip_suppressed = 0;
+ } else {
+ status = pt_blk_proceed_to_ip(decoder, block, insn, iext,
+ ev->variant.ptwrite.ip);
+ if (status <= 0)
+ return status;
+
+ /* We reached the PTWRITE instruction and @decoder->ip points to
+ * it; @insn/@iext still contain the preceding instruction.
+ *
+ * Proceed beyond the PTWRITE to account for it. Note that we
+ * may still overflow the block, which would cause us to
+ * postpone both instruction and event to the next block.
+ */
+ status = pt_blk_proceed_one_insn(decoder, block, insn, iext);
+ if (status <= 0)
+ return status;
+ }
+
+ return 1;
+}
+
+/* Try to work around erratum SKD022.
+ *
+ * If we get an asynchronous disable on VMLAUNCH or VMRESUME, the FUP that
+ * caused the disable to be asynchronous might have been bogous.
+ *
+ * Returns a positive integer if the erratum has been handled.
+ * Returns zero if the erratum does not apply.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_handle_erratum_skd022(struct pt_block_decoder *decoder,
+ struct pt_event *ev)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int errcode;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ insn.mode = decoder->mode;
+ insn.ip = ev->variant.async_disabled.at;
+
+ errcode = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid);
+ if (errcode < 0)
+ return 0;
+
+ switch (iext.iclass) {
+ default:
+ /* The erratum does not apply. */
+ return 0;
+
+ case PTI_INST_VMLAUNCH:
+ case PTI_INST_VMRESUME:
+ /* The erratum may apply. We can't be sure without a lot more
+ * analysis. Let's assume it does.
+ *
+ * We turn the async disable into a sync disable. Our caller
+ * will restart event processing.
+ */
+ ev->type = ptev_disabled;
+ ev->variant.disabled.ip = ev->variant.async_disabled.ip;
+
+ return 1;
+ }
+}
+
+/* Postpone proceeding past @insn/@iext and indicate a pending event.
+ *
+ * There may be further events pending on @insn/@iext. Postpone proceeding past
+ * @insn/@iext until we processed all events that bind to it.
+ *
+ * Returns a non-negative pt_status_flag bit-vector indicating a pending event
+ * on success, a negative pt_error_code otherwise.
+ */
+static int pt_blk_postpone_insn(struct pt_block_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!decoder || !insn || !iext)
+ return -pte_internal;
+
+ /* Only one can be active. */
+ if (decoder->process_insn)
+ return -pte_internal;
+
+ decoder->process_insn = 1;
+ decoder->insn = *insn;
+ decoder->iext = *iext;
+
+ return pt_blk_status(decoder, pts_event_pending);
+}
+
+/* Remove any postponed instruction from @decoder.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_blk_clear_postponed_insn(struct pt_block_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->process_insn = 0;
+ decoder->bound_paging = 0;
+ decoder->bound_vmcs = 0;
+ decoder->bound_ptwrite = 0;
+
+ return 0;
+}
+
+/* Proceed past a postponed instruction.
+ *
+ * If an instruction has been postponed in @decoder, proceed past it.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_blk_proceed_postponed_insn(struct pt_block_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* There's nothing to do if we have no postponed instruction. */
+ if (!decoder->process_insn)
+ return 0;
+
+ /* There's nothing to do if tracing got disabled. */
+ if (!decoder->enabled)
+ return pt_blk_clear_postponed_insn(decoder);
+
+ status = pt_insn_next_ip(&decoder->ip, &decoder->insn, &decoder->iext);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ status = pt_blk_proceed_with_trace(decoder, &decoder->insn,
+ &decoder->iext);
+ if (status < 0)
+ return status;
+ }
+
+ return pt_blk_clear_postponed_insn(decoder);
+}
+
+/* Proceed to the next event.
+ *
+ * We have an event pending. Proceed to the event location and indicate the
+ * event to the user.
+ *
+ * On our way to the event location we may also be forced to postpone the event
+ * to the next block, e.g. if we overflow the number of instructions in the
+ * block or if we need trace in order to reach the event location.
+ *
+ * If we're not able to reach the event location, we return zero. This is what
+ * pt_blk_status() would return since:
+ *
+ * - we suppress pts_eos as long as we're processing events
+ * - we do not set pts_ip_suppressed since tracing must be enabled
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ */
+static int pt_blk_proceed_event(struct pt_block_decoder *decoder,
+ struct pt_block *block)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ struct pt_event *ev;
+ int status;
+
+ if (!decoder || !decoder->process_event || !block)
+ return -pte_internal;
+
+ ev = &decoder->event;
+ switch (ev->type) {
+ case ptev_enabled:
+ break;
+
+ case ptev_disabled:
+ status = pt_blk_proceed_to_disabled(decoder, block, &insn,
+ &iext, ev);
+ if (status <= 0) {
+ /* A synchronous disable event also binds to the next
+ * indirect or conditional branch, i.e. to any branch
+ * that would have required trace.
+ */
+ if (status != -pte_bad_query)
+ return status;
+
+ status = pt_blk_set_disable_resume_ip(decoder, &insn);
+ if (status < 0)
+ return status;
+ }
+
+ break;
+
+ case ptev_async_disabled:
+ status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext,
+ ev->variant.async_disabled.at);
+ if (status <= 0)
+ return status;
+
+ if (decoder->query.config.errata.skd022) {
+ status = pt_blk_handle_erratum_skd022(decoder, ev);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ /* If the erratum hits, we modify the event.
+ * Try again.
+ */
+ return pt_blk_proceed_event(decoder, block);
+ }
+ }
+
+ break;
+
+ case ptev_async_branch:
+ status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext,
+ ev->variant.async_branch.from);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_paging:
+ if (!decoder->enabled)
+ break;
+
+ status = pt_blk_proceed_to_insn(decoder, block, &insn, &iext,
+ pt_insn_binds_to_pip);
+ if (status <= 0)
+ return status;
+
+ /* We bound a paging event. Make sure we do not bind further
+ * paging events to this instruction.
+ */
+ decoder->bound_paging = 1;
+
+ return pt_blk_postpone_insn(decoder, &insn, &iext);
+
+ case ptev_async_paging:
+ status = pt_blk_proceed_to_async_paging(decoder, block, ev);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_vmcs:
+ if (!decoder->enabled)
+ break;
+
+ status = pt_blk_proceed_to_insn(decoder, block, &insn, &iext,
+ pt_insn_binds_to_vmcs);
+ if (status <= 0)
+ return status;
+
+ /* We bound a vmcs event. Make sure we do not bind further vmcs
+ * events to this instruction.
+ */
+ decoder->bound_vmcs = 1;
+
+ return pt_blk_postpone_insn(decoder, &insn, &iext);
+
+ case ptev_async_vmcs:
+ status = pt_blk_proceed_to_async_vmcs(decoder, block, ev);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_overflow:
+ break;
+
+ case ptev_exec_mode:
+ status = pt_blk_proceed_to_exec_mode(decoder, block, ev);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_tsx:
+ if (ev->ip_suppressed)
+ break;
+
+ status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext,
+ ev->variant.tsx.ip);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_stop:
+ break;
+
+ case ptev_exstop:
+ if (!decoder->enabled || ev->ip_suppressed)
+ break;
+
+ status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext,
+ ev->variant.exstop.ip);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_mwait:
+ if (!decoder->enabled || ev->ip_suppressed)
+ break;
+
+ status = pt_blk_proceed_to_ip(decoder, block, &insn, &iext,
+ ev->variant.mwait.ip);
+ if (status <= 0)
+ return status;
+
+ break;
+
+ case ptev_pwre:
+ case ptev_pwrx:
+ break;
+
+ case ptev_ptwrite:
+ if (!decoder->enabled)
+ break;
+
+ status = pt_blk_proceed_to_ptwrite(decoder, block, &insn,
+ &iext, ev);
+ if (status <= 0)
+ return status;
+
+ /* We bound a ptwrite event. Make sure we do not bind further
+ * ptwrite events to this instruction.
+ */
+ decoder->bound_ptwrite = 1;
+
+ return pt_blk_postpone_insn(decoder, &insn, &iext);
+
+ case ptev_tick:
+ case ptev_cbr:
+ case ptev_mnt:
+ break;
+ }
+
+ return pt_blk_status(decoder, pts_event_pending);
+}
+
+/* Proceed to the next decision point without using the block cache.
+ *
+ * Tracing is enabled and we don't have an event pending. Proceed as far as
+ * we get without trace. Stop when we either:
+ *
+ * - need trace in order to continue
+ * - overflow the max number of instructions in a block
+ *
+ * We actually proceed one instruction further to get the start IP for the next
+ * block. This only updates @decoder's internal state, though.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_proceed_no_event_uncached(struct pt_block_decoder *decoder,
+ struct pt_block *block)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int status;
+
+ if (!decoder || !block)
+ return -pte_internal;
+
+ /* This is overly conservative, really. We shouldn't get a bad-query
+ * status unless we decoded at least one instruction successfully.
+ */
+ memset(&insn, 0, sizeof(insn));
+ memset(&iext, 0, sizeof(iext));
+
+ /* Proceed as far as we get without trace. */
+ status = pt_blk_proceed_to_insn(decoder, block, &insn, &iext,
+ pt_insn_false);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ return pt_blk_proceed_with_trace(decoder, &insn, &iext);
+ }
+
+ return 0;
+}
+
+/* Check if @ip is contained in @section loaded at @laddr.
+ *
+ * Returns non-zero if it is.
+ * Returns zero if it isn't or of @section is NULL.
+ */
+static inline int pt_blk_is_in_section(const struct pt_mapped_section *msec,
+ uint64_t ip)
+{
+ uint64_t begin, end;
+
+ begin = pt_msec_begin(msec);
+ end = pt_msec_end(msec);
+
+ return (begin <= ip && ip < end);
+}
+
+/* Insert a trampoline block cache entry.
+ *
+ * Add a trampoline block cache entry at @ip to continue at @nip, where @nip
+ * must be the next instruction after @ip.
+ *
+ * Both @ip and @nip must be section-relative
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static inline int pt_blk_add_trampoline(struct pt_block_cache *bcache,
+ uint64_t ip, uint64_t nip,
+ enum pt_exec_mode mode)
+{
+ struct pt_bcache_entry bce;
+ int64_t disp;
+
+ /* The displacement from @ip to @nip for the trampoline. */
+ disp = (int64_t) (nip - ip);
+
+ memset(&bce, 0, sizeof(bce));
+ bce.displacement = (int32_t) disp;
+ bce.ninsn = 1;
+ bce.mode = mode;
+ bce.qualifier = ptbq_again;
+
+ /* If we can't reach @nip without overflowing the displacement field, we
+ * have to stop and re-decode the instruction at @ip.
+ */
+ if ((int64_t) bce.displacement != disp) {
+
+ memset(&bce, 0, sizeof(bce));
+ bce.ninsn = 1;
+ bce.mode = mode;
+ bce.qualifier = ptbq_decode;
+ }
+
+ return pt_bcache_add(bcache, ip, bce);
+}
+
+/* Insert a decode block cache entry.
+ *
+ * Add a decode block cache entry at @ioff.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static inline int pt_blk_add_decode(struct pt_block_cache *bcache,
+ uint64_t ioff, enum pt_exec_mode mode)
+{
+ struct pt_bcache_entry bce;
+
+ memset(&bce, 0, sizeof(bce));
+ bce.ninsn = 1;
+ bce.mode = mode;
+ bce.qualifier = ptbq_decode;
+
+ return pt_bcache_add(bcache, ioff, bce);
+}
+
+enum {
+ /* The maximum number of steps when filling the block cache. */
+ bcache_fill_steps = 0x400
+};
+
+/* Proceed to the next instruction and fill the block cache for @decoder->ip.
+ *
+ * Tracing is enabled and we don't have an event pending. The current IP is not
+ * yet cached.
+ *
+ * Proceed one instruction without using the block cache, then try to proceed
+ * further using the block cache.
+ *
+ * On our way back, add a block cache entry for the IP before proceeding. Note
+ * that the recursion is bounded by @steps and ultimately by the maximum number
+ * of instructions in a block.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int
+pt_blk_proceed_no_event_fill_cache(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ struct pt_block_cache *bcache,
+ const struct pt_mapped_section *msec,
+ size_t steps)
+{
+ struct pt_bcache_entry bce;
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ uint64_t nip, dip;
+ int64_t disp, ioff, noff;
+ int status;
+
+ if (!decoder || !steps)
+ return -pte_internal;
+
+ /* Proceed one instruction by decoding and examining it.
+ *
+ * Note that we also return on a status of zero that indicates that the
+ * instruction didn't fit into @block.
+ */
+ status = pt_blk_proceed_one_insn(decoder, block, &insn, &iext);
+ if (status <= 0)
+ return status;
+
+ ioff = pt_msec_unmap(msec, insn.ip);
+
+ /* Let's see if we can proceed to the next IP without trace.
+ *
+ * If we can't, this is certainly a decision point.
+ */
+ status = pt_insn_next_ip(&decoder->ip, &insn, &iext);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ memset(&bce, 0, sizeof(bce));
+ bce.ninsn = 1;
+ bce.mode = insn.mode;
+ bce.isize = insn.size;
+
+ /* Clear the instruction size in case of overflows. */
+ if ((uint8_t) bce.isize != insn.size)
+ bce.isize = 0;
+
+ switch (insn.iclass) {
+ case ptic_ptwrite:
+ case ptic_error:
+ case ptic_other:
+ return -pte_internal;
+
+ case ptic_jump:
+ /* A direct jump doesn't require trace. */
+ if (iext.variant.branch.is_direct)
+ return -pte_internal;
+
+ bce.qualifier = ptbq_indirect;
+ break;
+
+ case ptic_call:
+ /* A direct call doesn't require trace. */
+ if (iext.variant.branch.is_direct)
+ return -pte_internal;
+
+ bce.qualifier = ptbq_ind_call;
+ break;
+
+ case ptic_return:
+ bce.qualifier = ptbq_return;
+ break;
+
+ case ptic_cond_jump:
+ bce.qualifier = ptbq_cond;
+ break;
+
+ case ptic_far_call:
+ case ptic_far_return:
+ case ptic_far_jump:
+ bce.qualifier = ptbq_indirect;
+ break;
+ }
+
+ /* If the block was truncated, we have to decode its last
+ * instruction each time.
+ *
+ * We could have skipped the above switch and size assignment in
+ * this case but this is already a slow and hopefully infrequent
+ * path.
+ */
+ if (block->truncated)
+ bce.qualifier = ptbq_decode;
+
+ status = pt_bcache_add(bcache, ioff, bce);
+ if (status < 0)
+ return status;
+
+ return pt_blk_proceed_with_trace(decoder, &insn, &iext);
+ }
+
+ /* The next instruction's IP. */
+ nip = decoder->ip;
+ noff = pt_msec_unmap(msec, nip);
+
+ /* Even if we were able to proceed without trace, we might have to stop
+ * here for various reasons:
+ *
+ * - at near direct calls to update the return-address stack
+ *
+ * We are forced to re-decode @insn to get the branch displacement.
+ *
+ * Even though it is constant, we don't cache it to avoid increasing
+ * the size of a cache entry. Note that the displacement field is
+ * zero for this entry and we might be tempted to use it - but other
+ * entries that point to this decision point will have non-zero
+ * displacement.
+ *
+ * We could proceed after a near direct call but we migh as well
+ * postpone it to the next iteration. Make sure to end the block if
+ * @decoder->flags.variant.block.end_on_call is set, though.
+ *
+ * - at near direct backwards jumps to detect section splits
+ *
+ * In case the current section is split underneath us, we must take
+ * care to detect that split.
+ *
+ * There is one corner case where the split is in the middle of a
+ * linear sequence of instructions that branches back into the
+ * originating section.
+ *
+ * Calls, indirect branches, and far branches are already covered
+ * since they either require trace or already require us to stop
+ * (i.e. near direct calls) for other reasons. That leaves near
+ * direct backward jumps.
+ *
+ * Instead of the decode stop at the jump instruction we're using we
+ * could have made sure that other block cache entries that extend
+ * this one insert a trampoline to the jump's entry. This would
+ * have been a bit more complicated.
+ *
+ * - if we switched sections
+ *
+ * This ends a block just like a branch that requires trace.
+ *
+ * We need to re-decode @insn in order to determine the start IP of
+ * the next block.
+ *
+ * - if the block is truncated
+ *
+ * We need to read the last instruction's memory from multiple
+ * sections and provide it to the user.
+ *
+ * We could still use the block cache but then we'd have to handle
+ * this case for each qualifier. Truncation is hopefully rare and
+ * having to read the memory for the instruction from multiple
+ * sections is already slow. Let's rather keep things simple and
+ * route it through the decode flow, where we already have
+ * everything in place.
+ */
+ switch (insn.iclass) {
+ case ptic_call:
+ return pt_blk_add_decode(bcache, ioff, insn.mode);
+
+ case ptic_jump:
+ /* An indirect branch requires trace and should have been
+ * handled above.
+ */
+ if (!iext.variant.branch.is_direct)
+ return -pte_internal;
+
+ if (iext.variant.branch.displacement < 0 ||
+ decoder->flags.variant.block.end_on_jump)
+ return pt_blk_add_decode(bcache, ioff, insn.mode);
+
+ fallthrough;
+ default:
+ if (!pt_blk_is_in_section(msec, nip) || block->truncated)
+ return pt_blk_add_decode(bcache, ioff, insn.mode);
+
+ break;
+ }
+
+ /* We proceeded one instruction. Let's see if we have a cache entry for
+ * the next instruction.
+ */
+ status = pt_bcache_lookup(&bce, bcache, noff);
+ if (status < 0)
+ return status;
+
+ /* If we don't have a valid cache entry, yet, fill the cache some more.
+ *
+ * On our way back, we add a cache entry for this instruction based on
+ * the cache entry of the succeeding instruction.
+ */
+ if (!pt_bce_is_valid(bce)) {
+ /* If we exceeded the maximum number of allowed steps, we insert
+ * a trampoline to the next instruction.
+ *
+ * The next time we encounter the same code, we will use the
+ * trampoline to jump directly to where we left off this time
+ * and continue from there.
+ */
+ steps -= 1;
+ if (!steps)
+ return pt_blk_add_trampoline(bcache, ioff, noff,
+ insn.mode);
+
+ status = pt_blk_proceed_no_event_fill_cache(decoder, block,
+ bcache, msec,
+ steps);
+ if (status < 0)
+ return status;
+
+ /* Let's see if we have more luck this time. */
+ status = pt_bcache_lookup(&bce, bcache, noff);
+ if (status < 0)
+ return status;
+
+ /* If we still don't have a valid cache entry, we're done. Most
+ * likely, @block overflowed and we couldn't proceed past the
+ * next instruction.
+ */
+ if (!pt_bce_is_valid(bce))
+ return 0;
+ }
+
+ /* We must not have switched execution modes.
+ *
+ * This would require an event and we're on the no-event flow.
+ */
+ if (pt_bce_exec_mode(bce) != insn.mode)
+ return -pte_internal;
+
+ /* The decision point IP and the displacement from @insn.ip. */
+ dip = nip + bce.displacement;
+ disp = (int64_t) (dip - insn.ip);
+
+ /* We may have switched sections if the section was split. See
+ * pt_blk_proceed_no_event_cached() for a more elaborate comment.
+ *
+ * We're not adding a block cache entry since this won't apply to the
+ * original section which may be shared with other decoders.
+ *
+ * We will instead take the slow path until the end of the section.
+ */
+ if (!pt_blk_is_in_section(msec, dip))
+ return 0;
+
+ /* Let's try to reach @nip's decision point from @insn.ip.
+ *
+ * There are two fields that may overflow: @bce.ninsn and
+ * @bce.displacement.
+ */
+ bce.ninsn += 1;
+ bce.displacement = (int32_t) disp;
+
+ /* If none of them overflowed, we're done.
+ *
+ * If one or both overflowed, let's try to insert a trampoline, i.e. we
+ * try to reach @dip via a ptbq_again entry to @nip.
+ */
+ if (!bce.ninsn || ((int64_t) bce.displacement != disp))
+ return pt_blk_add_trampoline(bcache, ioff, noff, insn.mode);
+
+ /* We're done. Add the cache entry.
+ *
+ * There's a chance that other decoders updated the cache entry in the
+ * meantime. They should have come to the same conclusion as we,
+ * though, and the cache entries should be identical.
+ *
+ * Cache updates are atomic so even if the two versions were not
+ * identical, we wouldn't care because they are both correct.
+ */
+ return pt_bcache_add(bcache, ioff, bce);
+}
+
+/* Proceed at a potentially truncated instruction.
+ *
+ * We were not able to decode the instruction at @decoder->ip in @decoder's
+ * cached section. This is typically caused by not having enough bytes.
+ *
+ * Try to decode the instruction again using the entire image. If this succeeds
+ * we expect to end up with an instruction that was truncated in the section it
+ * started. We provide the full instruction in this case and end the block.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_proceed_truncated(struct pt_block_decoder *decoder,
+ struct pt_block *block)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int errcode;
+
+ if (!decoder || !block)
+ return -pte_internal;
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ insn.mode = decoder->mode;
+ insn.ip = decoder->ip;
+
+ errcode = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid);
+ if (errcode < 0)
+ return errcode;
+
+ /* We shouldn't use this function if the instruction isn't truncated. */
+ if (!insn.truncated)
+ return -pte_internal;
+
+ /* Provide the instruction in the block. This ends the block. */
+ memcpy(block->raw, insn.raw, insn.size);
+ block->iclass = insn.iclass;
+ block->size = insn.size;
+ block->truncated = 1;
+
+ /* Log calls' return addresses for return compression. */
+ errcode = pt_blk_log_call(decoder, &insn, &iext);
+ if (errcode < 0)
+ return errcode;
+
+ /* Let's see if we can proceed to the next IP without trace.
+ *
+ * The truncated instruction ends the block but we still need to get the
+ * next block's start IP.
+ */
+ errcode = pt_insn_next_ip(&decoder->ip, &insn, &iext);
+ if (errcode < 0) {
+ if (errcode != -pte_bad_query)
+ return errcode;
+
+ return pt_blk_proceed_with_trace(decoder, &insn, &iext);
+ }
+
+ return 0;
+}
+
+/* Proceed to the next decision point using the block cache.
+ *
+ * Tracing is enabled and we don't have an event pending. We already set
+ * @block's isid. All reads are done within @msec as we're not switching
+ * sections between blocks.
+ *
+ * Proceed as far as we get without trace. Stop when we either:
+ *
+ * - need trace in order to continue
+ * - overflow the max number of instructions in a block
+ *
+ * We actually proceed one instruction further to get the start IP for the next
+ * block. This only updates @decoder's internal state, though.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_proceed_no_event_cached(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ struct pt_block_cache *bcache,
+ const struct pt_mapped_section *msec)
+{
+ struct pt_bcache_entry bce;
+ uint16_t binsn, ninsn;
+ uint64_t offset, nip;
+ int status;
+
+ if (!decoder || !block)
+ return -pte_internal;
+
+ offset = pt_msec_unmap(msec, decoder->ip);
+ status = pt_bcache_lookup(&bce, bcache, offset);
+ if (status < 0)
+ return status;
+
+ /* If we don't find a valid cache entry, fill the cache. */
+ if (!pt_bce_is_valid(bce))
+ return pt_blk_proceed_no_event_fill_cache(decoder, block,
+ bcache, msec,
+ bcache_fill_steps);
+
+ /* If we switched sections, the origianl section must have been split
+ * underneath us. A split preserves the block cache of the original
+ * section.
+ *
+ * Crossing sections requires ending the block so we can indicate the
+ * proper isid for the entire block.
+ *
+ * Plus there's the chance that the new section that caused the original
+ * section to split changed instructions.
+ *
+ * This check will also cover changes to a linear sequence of code we
+ * would otherwise have jumped over as long as the start and end are in
+ * different sub-sections.
+ *
+ * Since we stop on every (backwards) branch (through an artificial stop
+ * in the case of a near direct backward branch) we will detect all
+ * section splits.
+ *
+ * Switch to the slow path until we reach the end of this section.
+ */
+ nip = decoder->ip + bce.displacement;
+ if (!pt_blk_is_in_section(msec, nip))
+ return pt_blk_proceed_no_event_uncached(decoder, block);
+
+ /* We have a valid cache entry. Let's first check if the way to the
+ * decision point still fits into @block.
+ *
+ * If it doesn't, we end the block without filling it as much as we
+ * could since this would require us to switch to the slow path.
+ *
+ * On the next iteration, we will start with an empty block, which is
+ * guaranteed to have enough room for at least one block cache entry.
+ */
+ binsn = block->ninsn;
+ ninsn = binsn + (uint16_t) bce.ninsn;
+ if (ninsn < binsn)
+ return 0;
+
+ /* Jump ahead to the decision point and proceed from there.
+ *
+ * We're not switching execution modes so even if @block already has an
+ * execution mode, it will be the one we're going to set.
+ */
+ decoder->ip = nip;
+
+ /* We don't know the instruction class so we should be setting it to
+ * ptic_error. Since we will be able to fill it back in later in most
+ * cases, we move the clearing to the switch cases that don't.
+ */
+ block->end_ip = nip;
+ block->ninsn = ninsn;
+ block->mode = pt_bce_exec_mode(bce);
+
+
+ switch (pt_bce_qualifier(bce)) {
+ case ptbq_again:
+ /* We're not able to reach the actual decision point due to
+ * overflows so we inserted a trampoline.
+ *
+ * We don't know the instruction and it is not guaranteed that
+ * we will proceed further (e.g. if @block overflowed). Let's
+ * clear any previously stored instruction class which has
+ * become invalid when we updated @block->ninsn.
+ */
+ block->iclass = ptic_error;
+
+ return pt_blk_proceed_no_event_cached(decoder, block, bcache,
+ msec);
+
+ case ptbq_cond:
+ /* We're at a conditional branch. */
+ block->iclass = ptic_cond_jump;
+
+ /* Let's first check whether we know the size of the
+ * instruction. If we do, we might get away without decoding
+ * the instruction.
+ *
+ * If we don't know the size we might as well do the full decode
+ * and proceed-with-trace flow we do for ptbq_decode.
+ */
+ if (bce.isize) {
+ uint64_t ip;
+ int taken;
+
+ /* If the branch is not taken, we don't need to decode
+ * the instruction at @decoder->ip.
+ *
+ * If it is taken, we have to implement everything here.
+ * We can't use the normal decode and proceed-with-trace
+ * flow since we already consumed the TNT bit.
+ */
+ status = pt_blk_cond_branch(decoder, &taken);
+ if (status < 0)
+ return status;
+
+ /* Preserve the query decoder's response which indicates
+ * upcoming events.
+ */
+ decoder->status = status;
+
+ ip = decoder->ip;
+ if (taken) {
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ insn.mode = pt_bce_exec_mode(bce);
+ insn.ip = ip;
+
+ status = pt_blk_decode_in_section(&insn, &iext,
+ msec);
+ if (status < 0)
+ return status;
+
+ ip += iext.variant.branch.displacement;
+ }
+
+ decoder->ip = ip + bce.isize;
+ break;
+ }
+
+ fallthrough;
+ case ptbq_decode: {
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+
+ /* We need to decode the instruction at @decoder->ip and decide
+ * what to do based on that.
+ *
+ * We already accounted for the instruction so we can't just
+ * call pt_blk_proceed_one_insn().
+ */
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ insn.mode = pt_bce_exec_mode(bce);
+ insn.ip = decoder->ip;
+
+ status = pt_blk_decode_in_section(&insn, &iext, msec);
+ if (status < 0) {
+ if (status != -pte_bad_insn)
+ return status;
+
+ return pt_blk_proceed_truncated(decoder, block);
+ }
+
+ /* We just decoded @insn so we know the instruction class. */
+ block->iclass = insn.iclass;
+
+ /* Log calls' return addresses for return compression. */
+ status = pt_blk_log_call(decoder, &insn, &iext);
+ if (status < 0)
+ return status;
+
+ /* Let's see if we can proceed to the next IP without trace.
+ *
+ * Note that we also stop due to displacement overflows or to
+ * maintain the return-address stack for near direct calls.
+ */
+ status = pt_insn_next_ip(&decoder->ip, &insn, &iext);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ /* We can't, so let's proceed with trace, which
+ * completes the block.
+ */
+ return pt_blk_proceed_with_trace(decoder, &insn, &iext);
+ }
+
+ /* End the block if the user asked us to.
+ *
+ * We only need to take care about direct near branches.
+ * Indirect and far branches require trace and will naturally
+ * end a block.
+ */
+ if ((decoder->flags.variant.block.end_on_call &&
+ (insn.iclass == ptic_call)) ||
+ (decoder->flags.variant.block.end_on_jump &&
+ (insn.iclass == ptic_jump)))
+ break;
+
+ /* If we can proceed without trace and we stay in @msec we may
+ * proceed further.
+ *
+ * We're done if we switch sections, though.
+ */
+ if (!pt_blk_is_in_section(msec, decoder->ip))
+ break;
+
+ return pt_blk_proceed_no_event_cached(decoder, block, bcache,
+ msec);
+ }
+
+ case ptbq_ind_call: {
+ uint64_t ip;
+
+ /* We're at a near indirect call. */
+ block->iclass = ptic_call;
+
+ /* We need to update the return-address stack and query the
+ * destination IP.
+ */
+ ip = decoder->ip;
+
+ /* If we already know the size of the instruction, we don't need
+ * to re-decode it.
+ */
+ if (bce.isize)
+ ip += bce.isize;
+ else {
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ insn.mode = pt_bce_exec_mode(bce);
+ insn.ip = ip;
+
+ status = pt_blk_decode_in_section(&insn, &iext, msec);
+ if (status < 0)
+ return status;
+
+ ip += insn.size;
+ }
+
+ status = pt_retstack_push(&decoder->retstack, ip);
+ if (status < 0)
+ return status;
+
+ status = pt_blk_indirect_branch(decoder, &decoder->ip);
+ if (status < 0)
+ return status;
+
+ /* Preserve the query decoder's response which indicates
+ * upcoming events.
+ */
+ decoder->status = status;
+ break;
+ }
+
+ case ptbq_return: {
+ int taken;
+
+ /* We're at a near return. */
+ block->iclass = ptic_return;
+
+ /* Check for a compressed return. */
+ status = pt_blk_cond_branch(decoder, &taken);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ /* The return is not compressed. We need another query
+ * to determine the destination IP.
+ */
+ status = pt_blk_indirect_branch(decoder, &decoder->ip);
+ if (status < 0)
+ return status;
+
+ /* Preserve the query decoder's response which indicates
+ * upcoming events.
+ */
+ decoder->status = status;
+ break;
+ }
+
+ /* Preserve the query decoder's response which indicates
+ * upcoming events.
+ */
+ decoder->status = status;
+
+ /* A compressed return is indicated by a taken conditional
+ * branch.
+ */
+ if (!taken)
+ return -pte_bad_retcomp;
+
+ return pt_retstack_pop(&decoder->retstack, &decoder->ip);
+ }
+
+ case ptbq_indirect:
+ /* We're at an indirect jump or far transfer.
+ *
+ * We don't know the exact instruction class and there's no
+ * reason to decode the instruction for any other purpose.
+ *
+ * Indicate that we don't know the instruction class and leave
+ * it to our caller to decode the instruction if needed.
+ */
+ block->iclass = ptic_error;
+
+ /* This is neither a near call nor return so we don't need to
+ * touch the return-address stack.
+ *
+ * Just query the destination IP.
+ */
+ status = pt_blk_indirect_branch(decoder, &decoder->ip);
+ if (status < 0)
+ return status;
+
+ /* Preserve the query decoder's response which indicates
+ * upcoming events.
+ */
+ decoder->status = status;
+ break;
+ }
+
+ return 0;
+}
+
+static int pt_blk_msec_fill(struct pt_block_decoder *decoder,
+ const struct pt_mapped_section **pmsec)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_section *section;
+ int isid, errcode;
+
+ if (!decoder || !pmsec)
+ return -pte_internal;
+
+ isid = pt_msec_cache_fill(&decoder->scache, &msec, decoder->image,
+ &decoder->asid, decoder->ip);
+ if (isid < 0)
+ return isid;
+
+ section = pt_msec_section(msec);
+ if (!section)
+ return -pte_internal;
+
+ *pmsec = msec;
+
+ errcode = pt_section_request_bcache(section);
+ if (errcode < 0)
+ return errcode;
+
+ return isid;
+}
+
+static inline int pt_blk_msec_lookup(struct pt_block_decoder *decoder,
+ const struct pt_mapped_section **pmsec)
+{
+ int isid;
+
+ if (!decoder)
+ return -pte_internal;
+
+ isid = pt_msec_cache_read(&decoder->scache, pmsec, decoder->image,
+ decoder->ip);
+ if (isid < 0) {
+ if (isid != -pte_nomap)
+ return isid;
+
+ return pt_blk_msec_fill(decoder, pmsec);
+ }
+
+ return isid;
+}
+
+/* Proceed to the next decision point - try using the cache.
+ *
+ * Tracing is enabled and we don't have an event pending. Proceed as far as
+ * we get without trace. Stop when we either:
+ *
+ * - need trace in order to continue
+ * - overflow the max number of instructions in a block
+ *
+ * We actually proceed one instruction further to get the start IP for the next
+ * block. This only updates @decoder's internal state, though.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_proceed_no_event(struct pt_block_decoder *decoder,
+ struct pt_block *block)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_block_cache *bcache;
+ struct pt_section *section;
+ int isid;
+
+ if (!decoder || !block)
+ return -pte_internal;
+
+ isid = pt_blk_msec_lookup(decoder, &msec);
+ if (isid < 0) {
+ if (isid != -pte_nomap)
+ return isid;
+
+ /* Even if there is no such section in the image, we may still
+ * read the memory via the callback function.
+ */
+ return pt_blk_proceed_no_event_uncached(decoder, block);
+ }
+
+ /* We do not switch sections inside a block. */
+ if (isid != block->isid) {
+ if (!pt_blk_block_is_empty(block))
+ return 0;
+
+ block->isid = isid;
+ }
+
+ section = pt_msec_section(msec);
+ if (!section)
+ return -pte_internal;
+
+ bcache = pt_section_bcache(section);
+ if (!bcache)
+ return pt_blk_proceed_no_event_uncached(decoder, block);
+
+ return pt_blk_proceed_no_event_cached(decoder, block, bcache, msec);
+}
+
+/* Proceed to the next event or decision point.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ */
+static int pt_blk_proceed(struct pt_block_decoder *decoder,
+ struct pt_block *block)
+{
+ int status;
+
+ status = pt_blk_fetch_event(decoder);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ return pt_blk_proceed_event(decoder, block);
+ }
+
+ /* If tracing is disabled we should either be out of trace or we should
+ * have taken the event flow above.
+ */
+ if (!decoder->enabled) {
+ if (decoder->status & pts_eos)
+ return -pte_eos;
+
+ return -pte_no_enable;
+ }
+
+ status = pt_blk_proceed_no_event(decoder, block);
+ if (status < 0)
+ return status;
+
+ return pt_blk_proceed_trailing_event(decoder, block);
+}
+
+enum {
+ /* The maximum number of steps to take when determining whether the
+ * event location can be reached.
+ */
+ bdm64_max_steps = 0x100
+};
+
+/* Try to work around erratum BDM64.
+ *
+ * If we got a transaction abort immediately following a branch that produced
+ * trace, the trace for that branch might have been corrupted.
+ *
+ * Returns a positive integer if the erratum was handled.
+ * Returns zero if the erratum does not seem to apply.
+ * Returns a negative error code otherwise.
+ */
+static int pt_blk_handle_erratum_bdm64(struct pt_block_decoder *decoder,
+ const struct pt_block *block,
+ const struct pt_event *ev)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int status;
+
+ if (!decoder || !block || !ev)
+ return -pte_internal;
+
+ /* This only affects aborts. */
+ if (!ev->variant.tsx.aborted)
+ return 0;
+
+ /* This only affects branches that require trace.
+ *
+ * If the erratum hits, that branch ended the current block and brought
+ * us to the trailing event flow.
+ */
+ if (pt_blk_block_is_empty(block))
+ return 0;
+
+ insn.mode = block->mode;
+ insn.ip = block->end_ip;
+
+ status = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid);
+ if (status < 0)
+ return 0;
+
+ if (!pt_insn_is_branch(&insn, &iext))
+ return 0;
+
+ /* Let's check if we can reach the event location from here.
+ *
+ * If we can, let's assume the erratum did not hit. We might still be
+ * wrong but we're not able to tell.
+ */
+ status = pt_insn_range_is_contiguous(decoder->ip, ev->variant.tsx.ip,
+ decoder->mode, decoder->image,
+ &decoder->asid, bdm64_max_steps);
+ if (status > 0)
+ return status;
+
+ /* We can't reach the event location. This could either mean that we
+ * stopped too early (and status is zero) or that the erratum hit.
+ *
+ * We assume the latter and pretend that the previous branch brought us
+ * to the event location, instead.
+ */
+ decoder->ip = ev->variant.tsx.ip;
+
+ return 1;
+}
+
+/* Check whether a trailing TSX event should be postponed.
+ *
+ * This involves handling erratum BDM64.
+ *
+ * Returns a positive integer if the event is to be postponed.
+ * Returns zero if the event should be processed.
+ * Returns a negative error code otherwise.
+ */
+static inline int pt_blk_postpone_trailing_tsx(struct pt_block_decoder *decoder,
+ struct pt_block *block,
+ const struct pt_event *ev)
+{
+ int status;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ if (ev->ip_suppressed)
+ return 0;
+
+ if (block && decoder->query.config.errata.bdm64) {
+ status = pt_blk_handle_erratum_bdm64(decoder, block, ev);
+ if (status < 0)
+ return 1;
+ }
+
+ if (decoder->ip != ev->variant.tsx.ip)
+ return 1;
+
+ return 0;
+}
+
+/* Proceed with events that bind to the current decoder IP.
+ *
+ * This function is used in the following scenarios:
+ *
+ * - we just synchronized onto the trace stream
+ * - we ended a block and proceeded to the next IP
+ * - we processed an event that was indicated by this function
+ *
+ * Check if there is an event at the current IP that needs to be indicated to
+ * the user.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ */
+static int pt_blk_proceed_trailing_event(struct pt_block_decoder *decoder,
+ struct pt_block *block)
+{
+ struct pt_event *ev;
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_blk_fetch_event(decoder);
+ if (status <= 0) {
+ if (status < 0)
+ return status;
+
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, 0);
+ }
+
+ ev = &decoder->event;
+ switch (ev->type) {
+ case ptev_disabled:
+ /* Synchronous disable events are normally indicated on the
+ * event flow.
+ */
+ if (!decoder->process_insn)
+ break;
+
+ /* A sync disable may bind to a CR3 changing instruction. */
+ if (ev->ip_suppressed &&
+ pt_insn_changes_cr3(&decoder->insn, &decoder->iext))
+ return pt_blk_status(decoder, pts_event_pending);
+
+ /* Or it binds to the next branch that would require trace.
+ *
+ * Try to complete processing the current instruction by
+ * proceeding past it. If that fails because it would require
+ * trace, we can apply the disabled event.
+ */
+ status = pt_insn_next_ip(&decoder->ip, &decoder->insn,
+ &decoder->iext);
+ if (status < 0) {
+ if (status != -pte_bad_query)
+ return status;
+
+ status = pt_blk_set_disable_resume_ip(decoder,
+ &decoder->insn);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, pts_event_pending);
+ }
+
+ /* We proceeded past the current instruction. */
+ status = pt_blk_clear_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ /* This might have brought us to the disable IP. */
+ if (!ev->ip_suppressed &&
+ decoder->ip == ev->variant.disabled.ip)
+ return pt_blk_status(decoder, pts_event_pending);
+
+ break;
+
+ case ptev_enabled:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_async_disabled:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (decoder->ip != ev->variant.async_disabled.at)
+ break;
+
+ if (decoder->query.config.errata.skd022) {
+ status = pt_blk_handle_erratum_skd022(decoder, ev);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ /* If the erratum applies, the event is modified
+ * to a synchronous disable event that will be
+ * processed on the next pt_blk_proceed_event()
+ * call. We're done.
+ */
+ break;
+ }
+ }
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_async_branch:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (decoder->ip != ev->variant.async_branch.from)
+ break;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_paging:
+ /* We apply the event immediately if we're not tracing. */
+ if (!decoder->enabled)
+ return pt_blk_status(decoder, pts_event_pending);
+
+ /* Synchronous paging events are normally indicated on the event
+ * flow, unless they bind to the same instruction as a previous
+ * event.
+ *
+ * We bind at most one paging event to an instruction, though.
+ */
+ if (!decoder->process_insn || decoder->bound_paging)
+ break;
+
+ /* We're done if we're not binding to the currently postponed
+ * instruction. We will process the event on the normal event
+ * flow in the next iteration.
+ */
+ if (!pt_insn_binds_to_pip(&decoder->insn, &decoder->iext))
+ break;
+
+ /* We bound a paging event. Make sure we do not bind further
+ * paging events to this instruction.
+ */
+ decoder->bound_paging = 1;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_async_paging:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_paging.ip)
+ break;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_vmcs:
+ /* We apply the event immediately if we're not tracing. */
+ if (!decoder->enabled)
+ return pt_blk_status(decoder, pts_event_pending);
+
+ /* Synchronous vmcs events are normally indicated on the event
+ * flow, unless they bind to the same instruction as a previous
+ * event.
+ *
+ * We bind at most one vmcs event to an instruction, though.
+ */
+ if (!decoder->process_insn || decoder->bound_vmcs)
+ break;
+
+ /* We're done if we're not binding to the currently postponed
+ * instruction. We will process the event on the normal event
+ * flow in the next iteration.
+ */
+ if (!pt_insn_binds_to_vmcs(&decoder->insn, &decoder->iext))
+ break;
+
+ /* We bound a vmcs event. Make sure we do not bind further vmcs
+ * events to this instruction.
+ */
+ decoder->bound_vmcs = 1;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_async_vmcs:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_vmcs.ip)
+ break;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_overflow:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_exec_mode:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.exec_mode.ip)
+ break;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_tsx:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ status = pt_blk_postpone_trailing_tsx(decoder, block, ev);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ break;
+ }
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_stop:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_exstop:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.exstop.ip)
+ break;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_mwait:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.mwait.ip)
+ break;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_pwre:
+ case ptev_pwrx:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_ptwrite:
+ /* We apply the event immediately if we're not tracing. */
+ if (!decoder->enabled)
+ return pt_blk_status(decoder, pts_event_pending);
+
+ /* Ptwrite events are normally indicated on the event flow,
+ * unless they bind to the same instruction as a previous event.
+ *
+ * We bind at most one ptwrite event to an instruction, though.
+ */
+ if (!decoder->process_insn || decoder->bound_ptwrite)
+ break;
+
+ /* We're done if we're not binding to the currently postponed
+ * instruction. We will process the event on the normal event
+ * flow in the next iteration.
+ */
+ if (!ev->ip_suppressed ||
+ !pt_insn_is_ptwrite(&decoder->insn, &decoder->iext))
+ break;
+
+ /* We bound a ptwrite event. Make sure we do not bind further
+ * ptwrite events to this instruction.
+ */
+ decoder->bound_ptwrite = 1;
+
+ return pt_blk_status(decoder, pts_event_pending);
+
+ case ptev_tick:
+ case ptev_cbr:
+ case ptev_mnt:
+ /* This event does not bind to an instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, pts_event_pending);
+ }
+
+ /* No further events. Proceed past any postponed instruction. */
+ status = pt_blk_proceed_postponed_insn(decoder);
+ if (status < 0)
+ return status;
+
+ return pt_blk_status(decoder, 0);
+}
+
+int pt_blk_next(struct pt_block_decoder *decoder, struct pt_block *ublock,
+ size_t size)
+{
+ struct pt_block block, *pblock;
+ int errcode, status;
+
+ if (!decoder || !ublock)
+ return -pte_invalid;
+
+ pblock = size == sizeof(block) ? ublock : &block;
+
+ /* Zero-initialize the block in case of error returns. */
+ memset(pblock, 0, sizeof(*pblock));
+
+ /* Fill in a few things from the current decode state.
+ *
+ * This reflects the state of the last pt_blk_next() or pt_blk_start()
+ * call. Note that, unless we stop with tracing disabled, we proceed
+ * already to the start IP of the next block.
+ *
+ * Some of the state may later be overwritten as we process events.
+ */
+ pblock->ip = decoder->ip;
+ pblock->mode = decoder->mode;
+ if (decoder->speculative)
+ pblock->speculative = 1;
+
+ /* Proceed one block. */
+ status = pt_blk_proceed(decoder, pblock);
+
+ errcode = block_to_user(ublock, size, pblock);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+}
+
+/* Process an enabled event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_enabled(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* We must have an IP in order to start decoding. */
+ if (ev->ip_suppressed)
+ return -pte_noip;
+
+ /* We must currently be disabled. */
+ if (decoder->enabled)
+ return -pte_bad_context;
+
+ decoder->ip = ev->variant.enabled.ip;
+ decoder->enabled = 1;
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process a disabled event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_disabled(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* We must currently be enabled. */
+ if (!decoder->enabled)
+ return -pte_bad_context;
+
+ /* We preserve @decoder->ip. This is where we expect tracing to resume
+ * and we'll indicate that on the subsequent enabled event if tracing
+ * actually does resume from there.
+ */
+ decoder->enabled = 0;
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process an asynchronous branch event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_async_branch(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* We must currently be enabled. */
+ if (!decoder->enabled)
+ return -pte_bad_context;
+
+ /* Jump to the branch destination. We will continue from there in the
+ * next iteration.
+ */
+ decoder->ip = ev->variant.async_branch.to;
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process a paging event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_paging(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ uint64_t cr3;
+ int errcode;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ cr3 = ev->variant.paging.cr3;
+ if (decoder->asid.cr3 != cr3) {
+ errcode = pt_msec_cache_invalidate(&decoder->scache);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->asid.cr3 = cr3;
+ }
+
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process a vmcs event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_vmcs(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ uint64_t vmcs;
+ int errcode;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ vmcs = ev->variant.vmcs.base;
+ if (decoder->asid.vmcs != vmcs) {
+ errcode = pt_msec_cache_invalidate(&decoder->scache);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->asid.vmcs = vmcs;
+ }
+
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process an overflow event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_overflow(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* If the IP is suppressed, the overflow resolved while tracing was
+ * disabled. Otherwise it resolved while tracing was enabled.
+ */
+ if (ev->ip_suppressed) {
+ /* Tracing is disabled. It doesn't make sense to preserve the
+ * previous IP. This will just be misleading. Even if tracing
+ * had been disabled before, as well, we might have missed the
+ * re-enable in the overflow.
+ */
+ decoder->enabled = 0;
+ decoder->ip = 0ull;
+ } else {
+ /* Tracing is enabled and we're at the IP at which the overflow
+ * resolved.
+ */
+ decoder->enabled = 1;
+ decoder->ip = ev->variant.overflow.ip;
+ }
+
+ /* We don't know the TSX state. Let's assume we execute normally.
+ *
+ * We also don't know the execution mode. Let's keep what we have
+ * in case we don't get an update before we have to decode the next
+ * instruction.
+ */
+ decoder->speculative = 0;
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process an exec mode event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_exec_mode(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ enum pt_exec_mode mode;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* Use status update events to diagnose inconsistencies. */
+ mode = ev->variant.exec_mode.mode;
+ if (ev->status_update && decoder->enabled &&
+ decoder->mode != ptem_unknown && decoder->mode != mode)
+ return -pte_bad_status_update;
+
+ decoder->mode = mode;
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process a tsx event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_tsx(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ decoder->speculative = ev->variant.tsx.speculative;
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+/* Process a stop event.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_blk_process_stop(struct pt_block_decoder *decoder,
+ const struct pt_event *ev)
+{
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* Tracing is always disabled before it is stopped. */
+ if (decoder->enabled)
+ return -pte_bad_context;
+
+ decoder->process_event = 0;
+
+ return 0;
+}
+
+int pt_blk_event(struct pt_block_decoder *decoder, struct pt_event *uevent,
+ size_t size)
+{
+ struct pt_event *ev;
+ int status;
+
+ if (!decoder || !uevent)
+ return -pte_invalid;
+
+ /* We must currently process an event. */
+ if (!decoder->process_event)
+ return -pte_bad_query;
+
+ ev = &decoder->event;
+ switch (ev->type) {
+ case ptev_enabled:
+ /* Indicate that tracing resumes from the IP at which tracing
+ * had been disabled before (with some special treatment for
+ * calls).
+ */
+ if (ev->variant.enabled.ip == decoder->ip)
+ ev->variant.enabled.resumed = 1;
+
+ status = pt_blk_process_enabled(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_disabled:
+ if (decoder->ip != ev->variant.async_disabled.at)
+ return -pte_bad_query;
+
+ fallthrough;
+ case ptev_disabled:
+
+ status = pt_blk_process_disabled(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_branch:
+ if (decoder->ip != ev->variant.async_branch.from)
+ return -pte_bad_query;
+
+ status = pt_blk_process_async_branch(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_paging:
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_paging.ip)
+ return -pte_bad_query;
+
+ fallthrough;
+ case ptev_paging:
+ status = pt_blk_process_paging(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_vmcs:
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_vmcs.ip)
+ return -pte_bad_query;
+
+ fallthrough;
+ case ptev_vmcs:
+ status = pt_blk_process_vmcs(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_overflow:
+ status = pt_blk_process_overflow(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_exec_mode:
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.exec_mode.ip)
+ return -pte_bad_query;
+
+ status = pt_blk_process_exec_mode(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_tsx:
+ if (!ev->ip_suppressed && decoder->ip != ev->variant.tsx.ip)
+ return -pte_bad_query;
+
+ status = pt_blk_process_tsx(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_stop:
+ status = pt_blk_process_stop(decoder, ev);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_exstop:
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.exstop.ip)
+ return -pte_bad_query;
+
+ decoder->process_event = 0;
+ break;
+
+ case ptev_mwait:
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.mwait.ip)
+ return -pte_bad_query;
+
+ decoder->process_event = 0;
+ break;
+
+ case ptev_pwre:
+ case ptev_pwrx:
+ case ptev_ptwrite:
+ case ptev_tick:
+ case ptev_cbr:
+ case ptev_mnt:
+ decoder->process_event = 0;
+ break;
+ }
+
+ /* Copy the event to the user. Make sure we're not writing beyond the
+ * memory provided by the user.
+ *
+ * We might truncate details of an event but only for those events the
+ * user can't know about, anyway.
+ */
+ if (sizeof(*ev) < size)
+ size = sizeof(*ev);
+
+ memcpy(uevent, ev, size);
+
+ /* Indicate further events. */
+ return pt_blk_proceed_trailing_event(decoder, NULL);
+}
diff --git a/contrib/processor-trace/libipt/src/pt_config.c b/contrib/processor-trace/libipt/src/pt_config.c
new file mode 100644
index 0000000000000..1479daebd556b
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_config.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_config.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+#include <stddef.h>
+
+
+int pt_cpu_errata(struct pt_errata *errata, const struct pt_cpu *cpu)
+{
+ if (!errata || !cpu)
+ return -pte_invalid;
+
+ memset(errata, 0, sizeof(*errata));
+
+ /* We don't know about others. */
+ if (cpu->vendor != pcv_intel)
+ return -pte_bad_cpu;
+
+ switch (cpu->family) {
+ case 0x6:
+ switch (cpu->model) {
+ case 0x3d:
+ case 0x47:
+ case 0x4f:
+ case 0x56:
+ errata->bdm70 = 1;
+ errata->bdm64 = 1;
+ return 0;
+
+ case 0x4e:
+ case 0x5e:
+ errata->bdm70 = 1;
+ errata->skd007 = 1;
+ errata->skd022 = 1;
+ errata->skd010 = 1;
+ errata->skl014 = 1;
+ return 0;
+
+ case 0x8e:
+ case 0x9e:
+ errata->bdm70 = 1;
+ errata->skl014 = 1;
+ errata->skd022 = 1;
+ errata->skd010 = 1;
+ errata->skd007 = 1;
+ return 0;
+
+ case 0x5c:
+ case 0x5f:
+ errata->apl12 = 1;
+ errata->apl11 = 1;
+ return 0;
+ }
+ break;
+ }
+
+ return -pte_bad_cpu;
+}
+
+int pt_config_from_user(struct pt_config *config,
+ const struct pt_config *uconfig)
+{
+ uint8_t *begin, *end;
+ size_t size;
+
+ if (!config)
+ return -pte_internal;
+
+ if (!uconfig)
+ return -pte_invalid;
+
+ size = uconfig->size;
+ if (size < offsetof(struct pt_config, decode))
+ return -pte_bad_config;
+
+ begin = uconfig->begin;
+ end = uconfig->end;
+
+ if (!begin || !end || end < begin)
+ return -pte_bad_config;
+
+ /* Ignore fields in the user's configuration we don't know; zero out
+ * fields the user didn't know about.
+ */
+ if (sizeof(*config) <= size)
+ size = sizeof(*config);
+ else
+ memset(((uint8_t *) config) + size, 0, sizeof(*config) - size);
+
+ /* Copy (portions of) the user's configuration. */
+ memcpy(config, uconfig, size);
+
+ /* We copied user's size - fix it. */
+ config->size = size;
+
+ return 0;
+}
+
+/* The maximum number of filter addresses that fit into the configuration. */
+static inline size_t pt_filter_addr_ncfg(void)
+{
+ return (sizeof(struct pt_conf_addr_filter) -
+ offsetof(struct pt_conf_addr_filter, addr0_a)) /
+ (2 * sizeof(uint64_t));
+}
+
+uint32_t pt_filter_addr_cfg(const struct pt_conf_addr_filter *filter, uint8_t n)
+{
+ if (!filter)
+ return 0u;
+
+ if (pt_filter_addr_ncfg() <= n)
+ return 0u;
+
+ return (filter->config.addr_cfg >> (4 * n)) & 0xf;
+}
+
+uint64_t pt_filter_addr_a(const struct pt_conf_addr_filter *filter, uint8_t n)
+{
+ const uint64_t *addr;
+
+ if (!filter)
+ return 0ull;
+
+ if (pt_filter_addr_ncfg() <= n)
+ return 0ull;
+
+ addr = &filter->addr0_a;
+ return addr[2 * n];
+}
+
+uint64_t pt_filter_addr_b(const struct pt_conf_addr_filter *filter, uint8_t n)
+{
+ const uint64_t *addr;
+
+ if (!filter)
+ return 0ull;
+
+ if (pt_filter_addr_ncfg() <= n)
+ return 0ull;
+
+ addr = &filter->addr0_a;
+ return addr[(2 * n) + 1];
+}
+
+static int pt_filter_check_cfg_filter(const struct pt_conf_addr_filter *filter,
+ uint64_t addr)
+{
+ uint8_t n;
+
+ if (!filter)
+ return -pte_internal;
+
+ for (n = 0; n < pt_filter_addr_ncfg(); ++n) {
+ uint64_t addr_a, addr_b;
+ uint32_t addr_cfg;
+
+ addr_cfg = pt_filter_addr_cfg(filter, n);
+ if (addr_cfg != pt_addr_cfg_filter)
+ continue;
+
+ addr_a = pt_filter_addr_a(filter, n);
+ addr_b = pt_filter_addr_b(filter, n);
+
+ /* Note that both A and B are inclusive. */
+ if ((addr_a <= addr) && (addr <= addr_b))
+ return 1;
+ }
+
+ /* No filter hit. If we have at least one FilterEn filter, this means
+ * that tracing is disabled; otherwise, tracing is enabled.
+ */
+ for (n = 0; n < pt_filter_addr_ncfg(); ++n) {
+ uint32_t addr_cfg;
+
+ addr_cfg = pt_filter_addr_cfg(filter, n);
+ if (addr_cfg == pt_addr_cfg_filter)
+ return 0;
+ }
+
+ return 1;
+}
+
+static int pt_filter_check_cfg_stop(const struct pt_conf_addr_filter *filter,
+ uint64_t addr)
+{
+ uint8_t n;
+
+ if (!filter)
+ return -pte_internal;
+
+ for (n = 0; n < pt_filter_addr_ncfg(); ++n) {
+ uint64_t addr_a, addr_b;
+ uint32_t addr_cfg;
+
+ addr_cfg = pt_filter_addr_cfg(filter, n);
+ if (addr_cfg != pt_addr_cfg_stop)
+ continue;
+
+ addr_a = pt_filter_addr_a(filter, n);
+ addr_b = pt_filter_addr_b(filter, n);
+
+ /* Note that both A and B are inclusive. */
+ if ((addr_a <= addr) && (addr <= addr_b))
+ return 0;
+ }
+
+ return 1;
+}
+
+int pt_filter_addr_check(const struct pt_conf_addr_filter *filter,
+ uint64_t addr)
+{
+ int status;
+
+ status = pt_filter_check_cfg_stop(filter, addr);
+ if (status <= 0)
+ return status;
+
+ return pt_filter_check_cfg_filter(filter, addr);
+}
diff --git a/contrib/processor-trace/libipt/src/pt_cpu.c b/contrib/processor-trace/libipt/src/pt_cpu.c
new file mode 100644
index 0000000000000..c47e54d40cf6d
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_cpu.c
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_cpu.h"
+#include "pt_cpuid.h"
+
+#include "intel-pt.h"
+
+#include <limits.h>
+#include <stdlib.h>
+
+
+static const char * const cpu_vendors[] = {
+ "",
+ "GenuineIntel"
+};
+
+enum {
+ pt_cpuid_vendor_size = 12
+};
+
+union cpu_vendor {
+ /* The raw data returned from cpuid. */
+ struct {
+ uint32_t ebx;
+ uint32_t edx;
+ uint32_t ecx;
+ } cpuid;
+
+ /* The resulting vendor string. */
+ char vendor_string[pt_cpuid_vendor_size];
+};
+
+static enum pt_cpu_vendor cpu_vendor(void)
+{
+ union cpu_vendor vendor;
+ uint32_t eax;
+ size_t i;
+
+ memset(&vendor, 0, sizeof(vendor));
+ eax = 0;
+
+ pt_cpuid(0u, &eax, &vendor.cpuid.ebx, &vendor.cpuid.ecx,
+ &vendor.cpuid.edx);
+
+ for (i = 0; i < sizeof(cpu_vendors)/sizeof(*cpu_vendors); i++)
+ if (strncmp(vendor.vendor_string,
+ cpu_vendors[i], pt_cpuid_vendor_size) == 0)
+ return (enum pt_cpu_vendor) i;
+
+ return pcv_unknown;
+}
+
+static uint32_t cpu_info(void)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ eax = 0;
+ ebx = 0;
+ ecx = 0;
+ edx = 0;
+ pt_cpuid(1u, &eax, &ebx, &ecx, &edx);
+
+ return eax;
+}
+
+int pt_cpu_parse(struct pt_cpu *cpu, const char *s)
+{
+ const char sep = '/';
+ char *endptr;
+ long family, model, stepping;
+
+ if (!cpu || !s)
+ return -pte_invalid;
+
+ family = strtol(s, &endptr, 0);
+ if (s == endptr || *endptr == '\0' || *endptr != sep)
+ return -pte_invalid;
+
+ if (family < 0 || family > USHRT_MAX)
+ return -pte_invalid;
+
+ /* skip separator */
+ s = endptr + 1;
+
+ model = strtol(s, &endptr, 0);
+ if (s == endptr || (*endptr != '\0' && *endptr != sep))
+ return -pte_invalid;
+
+ if (model < 0 || model > UCHAR_MAX)
+ return -pte_invalid;
+
+ if (*endptr == '\0')
+ /* stepping was omitted, it defaults to 0 */
+ stepping = 0;
+ else {
+ /* skip separator */
+ s = endptr + 1;
+
+ stepping = strtol(s, &endptr, 0);
+ if (*endptr != '\0')
+ return -pte_invalid;
+
+ if (stepping < 0 || stepping > UCHAR_MAX)
+ return -pte_invalid;
+ }
+
+ cpu->vendor = pcv_intel;
+ cpu->family = (uint16_t) family;
+ cpu->model = (uint8_t) model;
+ cpu->stepping = (uint8_t) stepping;
+
+ return 0;
+}
+
+int pt_cpu_read(struct pt_cpu *cpu)
+{
+ uint32_t info;
+ uint16_t family;
+
+ if (!cpu)
+ return -pte_invalid;
+
+ cpu->vendor = cpu_vendor();
+
+ info = cpu_info();
+
+ cpu->family = family = (info>>8) & 0xf;
+ if (family == 0xf)
+ cpu->family += (info>>20) & 0xf;
+
+ cpu->model = (info>>4) & 0xf;
+ if (family == 0x6 || family == 0xf)
+ cpu->model += (info>>12) & 0xf0;
+
+ cpu->stepping = (info>>0) & 0xf;
+
+ return 0;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_decoder_function.c b/contrib/processor-trace/libipt/src/pt_decoder_function.c
new file mode 100644
index 0000000000000..4c7d48e1c68cf
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_decoder_function.c
@@ -0,0 +1,379 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_decoder_function.h"
+#include "pt_packet_decoder.h"
+#include "pt_query_decoder.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+
+const struct pt_decoder_function pt_decode_unknown = {
+ /* .packet = */ pt_pkt_decode_unknown,
+ /* .decode = */ pt_qry_decode_unknown,
+ /* .header = */ pt_qry_decode_unknown,
+ /* .flags = */ pdff_unknown
+};
+
+const struct pt_decoder_function pt_decode_pad = {
+ /* .packet = */ pt_pkt_decode_pad,
+ /* .decode = */ pt_qry_decode_pad,
+ /* .header = */ pt_qry_decode_pad,
+ /* .flags = */ pdff_pad
+};
+
+const struct pt_decoder_function pt_decode_psb = {
+ /* .packet = */ pt_pkt_decode_psb,
+ /* .decode = */ pt_qry_decode_psb,
+ /* .header = */ NULL,
+ /* .flags = */ 0
+};
+
+const struct pt_decoder_function pt_decode_tip = {
+ /* .packet = */ pt_pkt_decode_tip,
+ /* .decode = */ pt_qry_decode_tip,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_tip
+};
+
+const struct pt_decoder_function pt_decode_tnt_8 = {
+ /* .packet = */ pt_pkt_decode_tnt_8,
+ /* .decode = */ pt_qry_decode_tnt_8,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_tnt
+};
+
+const struct pt_decoder_function pt_decode_tnt_64 = {
+ /* .packet = */ pt_pkt_decode_tnt_64,
+ /* .decode = */ pt_qry_decode_tnt_64,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_tnt
+};
+
+const struct pt_decoder_function pt_decode_tip_pge = {
+ /* .packet = */ pt_pkt_decode_tip_pge,
+ /* .decode = */ pt_qry_decode_tip_pge,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_tip_pgd = {
+ /* .packet = */ pt_pkt_decode_tip_pgd,
+ /* .decode = */ pt_qry_decode_tip_pgd,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_fup = {
+ /* .packet = */ pt_pkt_decode_fup,
+ /* .decode = */ pt_qry_decode_fup,
+ /* .header = */ pt_qry_header_fup,
+ /* .flags = */ pdff_fup
+};
+
+const struct pt_decoder_function pt_decode_pip = {
+ /* .packet = */ pt_pkt_decode_pip,
+ /* .decode = */ pt_qry_decode_pip,
+ /* .header = */ pt_qry_header_pip,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_ovf = {
+ /* .packet = */ pt_pkt_decode_ovf,
+ /* .decode = */ pt_qry_decode_ovf,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_psbend | pdff_event
+};
+
+const struct pt_decoder_function pt_decode_mode = {
+ /* .packet = */ pt_pkt_decode_mode,
+ /* .decode = */ pt_qry_decode_mode,
+ /* .header = */ pt_qry_header_mode,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_psbend = {
+ /* .packet = */ pt_pkt_decode_psbend,
+ /* .decode = */ pt_qry_decode_psbend,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_psbend
+};
+
+const struct pt_decoder_function pt_decode_tsc = {
+ /* .packet = */ pt_pkt_decode_tsc,
+ /* .decode = */ pt_qry_decode_tsc,
+ /* .header = */ pt_qry_header_tsc,
+ /* .flags = */ pdff_timing
+};
+
+const struct pt_decoder_function pt_decode_cbr = {
+ /* .packet = */ pt_pkt_decode_cbr,
+ /* .decode = */ pt_qry_decode_cbr,
+ /* .header = */ pt_qry_header_cbr,
+ /* .flags = */ pdff_timing | pdff_event
+};
+
+const struct pt_decoder_function pt_decode_tma = {
+ /* .packet = */ pt_pkt_decode_tma,
+ /* .decode = */ pt_qry_decode_tma,
+ /* .header = */ pt_qry_decode_tma,
+ /* .flags = */ pdff_timing
+};
+
+const struct pt_decoder_function pt_decode_mtc = {
+ /* .packet = */ pt_pkt_decode_mtc,
+ /* .decode = */ pt_qry_decode_mtc,
+ /* .header = */ pt_qry_decode_mtc,
+ /* .flags = */ pdff_timing
+};
+
+const struct pt_decoder_function pt_decode_cyc = {
+ /* .packet = */ pt_pkt_decode_cyc,
+ /* .decode = */ pt_qry_decode_cyc,
+ /* .header = */ pt_qry_decode_cyc,
+ /* .flags = */ pdff_timing
+};
+
+const struct pt_decoder_function pt_decode_stop = {
+ /* .packet = */ pt_pkt_decode_stop,
+ /* .decode = */ pt_qry_decode_stop,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_vmcs = {
+ /* .packet = */ pt_pkt_decode_vmcs,
+ /* .decode = */ pt_qry_decode_vmcs,
+ /* .header = */ pt_qry_header_vmcs,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_mnt = {
+ /* .packet = */ pt_pkt_decode_mnt,
+ /* .decode = */ pt_qry_decode_mnt,
+ /* .header = */ pt_qry_header_mnt,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_exstop = {
+ /* .packet = */ pt_pkt_decode_exstop,
+ /* .decode = */ pt_qry_decode_exstop,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_mwait = {
+ /* .packet = */ pt_pkt_decode_mwait,
+ /* .decode = */ pt_qry_decode_mwait,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_pwre = {
+ /* .packet = */ pt_pkt_decode_pwre,
+ /* .decode = */ pt_qry_decode_pwre,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_pwrx = {
+ /* .packet = */ pt_pkt_decode_pwrx,
+ /* .decode = */ pt_qry_decode_pwrx,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+const struct pt_decoder_function pt_decode_ptw = {
+ /* .packet = */ pt_pkt_decode_ptw,
+ /* .decode = */ pt_qry_decode_ptw,
+ /* .header = */ NULL,
+ /* .flags = */ pdff_event
+};
+
+
+int pt_df_fetch(const struct pt_decoder_function **dfun, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+ uint8_t opc, ext, ext2;
+
+ if (!dfun || !config)
+ return -pte_internal;
+
+ /* Clear the decode function in case of errors. */
+ *dfun = NULL;
+
+ begin = config->begin;
+ end = config->end;
+
+ if (!pos || (pos < begin) || (end < pos))
+ return -pte_nosync;
+
+ if (pos == end)
+ return -pte_eos;
+
+ opc = *pos++;
+ switch (opc) {
+ default:
+ /* Check opcodes that require masking. */
+ if ((opc & pt_opm_tnt_8) == pt_opc_tnt_8) {
+ *dfun = &pt_decode_tnt_8;
+ return 0;
+ }
+
+ if ((opc & pt_opm_cyc) == pt_opc_cyc) {
+ *dfun = &pt_decode_cyc;
+ return 0;
+ }
+
+ if ((opc & pt_opm_tip) == pt_opc_tip) {
+ *dfun = &pt_decode_tip;
+ return 0;
+ }
+
+ if ((opc & pt_opm_fup) == pt_opc_fup) {
+ *dfun = &pt_decode_fup;
+ return 0;
+ }
+
+ if ((opc & pt_opm_tip) == pt_opc_tip_pge) {
+ *dfun = &pt_decode_tip_pge;
+ return 0;
+ }
+
+ if ((opc & pt_opm_tip) == pt_opc_tip_pgd) {
+ *dfun = &pt_decode_tip_pgd;
+ return 0;
+ }
+
+ *dfun = &pt_decode_unknown;
+ return 0;
+
+ case pt_opc_pad:
+ *dfun = &pt_decode_pad;
+ return 0;
+
+ case pt_opc_mode:
+ *dfun = &pt_decode_mode;
+ return 0;
+
+ case pt_opc_tsc:
+ *dfun = &pt_decode_tsc;
+ return 0;
+
+ case pt_opc_mtc:
+ *dfun = &pt_decode_mtc;
+ return 0;
+
+ case pt_opc_ext:
+ if (pos == end)
+ return -pte_eos;
+
+ ext = *pos++;
+ switch (ext) {
+ default:
+ /* Check opcodes that require masking. */
+ if ((ext & pt_opm_ptw) == pt_ext_ptw) {
+ *dfun = &pt_decode_ptw;
+ return 0;
+ }
+
+ *dfun = &pt_decode_unknown;
+ return 0;
+
+ case pt_ext_psb:
+ *dfun = &pt_decode_psb;
+ return 0;
+
+ case pt_ext_ovf:
+ *dfun = &pt_decode_ovf;
+ return 0;
+
+ case pt_ext_tnt_64:
+ *dfun = &pt_decode_tnt_64;
+ return 0;
+
+ case pt_ext_psbend:
+ *dfun = &pt_decode_psbend;
+ return 0;
+
+ case pt_ext_cbr:
+ *dfun = &pt_decode_cbr;
+ return 0;
+
+ case pt_ext_pip:
+ *dfun = &pt_decode_pip;
+ return 0;
+
+ case pt_ext_tma:
+ *dfun = &pt_decode_tma;
+ return 0;
+
+ case pt_ext_stop:
+ *dfun = &pt_decode_stop;
+ return 0;
+
+ case pt_ext_vmcs:
+ *dfun = &pt_decode_vmcs;
+ return 0;
+
+ case pt_ext_exstop:
+ case pt_ext_exstop_ip:
+ *dfun = &pt_decode_exstop;
+ return 0;
+
+ case pt_ext_mwait:
+ *dfun = &pt_decode_mwait;
+ return 0;
+
+ case pt_ext_pwre:
+ *dfun = &pt_decode_pwre;
+ return 0;
+
+ case pt_ext_pwrx:
+ *dfun = &pt_decode_pwrx;
+ return 0;
+
+ case pt_ext_ext2:
+ if (pos == end)
+ return -pte_eos;
+
+ ext2 = *pos++;
+ switch (ext2) {
+ default:
+ *dfun = &pt_decode_unknown;
+ return 0;
+
+ case pt_ext2_mnt:
+ *dfun = &pt_decode_mnt;
+ return 0;
+ }
+ }
+ }
+}
diff --git a/contrib/processor-trace/libipt/src/pt_encoder.c b/contrib/processor-trace/libipt/src/pt_encoder.c
new file mode 100644
index 0000000000000..946b88cbff7ad
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_encoder.c
@@ -0,0 +1,917 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_encoder.h"
+#include "pt_config.h"
+#include "pt_opcodes.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+
+int pt_encoder_init(struct pt_encoder *encoder, const struct pt_config *config)
+{
+ int errcode;
+
+ if (!encoder)
+ return -pte_invalid;
+
+ memset(encoder, 0, sizeof(*encoder));
+
+ errcode = pt_config_from_user(&encoder->config, config);
+ if (errcode < 0)
+ return errcode;
+
+ encoder->pos = encoder->config.begin;
+
+ return 0;
+}
+
+void pt_encoder_fini(struct pt_encoder *encoder)
+{
+ (void) encoder;
+
+ /* Nothing to do. */
+}
+
+struct pt_encoder *pt_alloc_encoder(const struct pt_config *config)
+{
+ struct pt_encoder *encoder;
+ int errcode;
+
+ encoder = malloc(sizeof(*encoder));
+ if (!encoder)
+ return NULL;
+
+ errcode = pt_encoder_init(encoder, config);
+ if (errcode < 0) {
+ free(encoder);
+ return NULL;
+ }
+
+ return encoder;
+}
+
+void pt_free_encoder(struct pt_encoder *encoder)
+{
+ pt_encoder_fini(encoder);
+ free(encoder);
+}
+
+int pt_enc_sync_set(struct pt_encoder *encoder, uint64_t offset)
+{
+ uint8_t *begin, *end, *pos;
+
+ if (!encoder)
+ return -pte_invalid;
+
+ begin = encoder->config.begin;
+ end = encoder->config.end;
+ pos = begin + offset;
+
+ if (end < pos || pos < begin)
+ return -pte_eos;
+
+ encoder->pos = pos;
+ return 0;
+}
+
+int pt_enc_get_offset(const struct pt_encoder *encoder, uint64_t *offset)
+{
+ const uint8_t *raw, *begin;
+
+ if (!encoder || !offset)
+ return -pte_invalid;
+
+ /* The encoder is synchronized at all times. */
+ raw = encoder->pos;
+ if (!raw)
+ return -pte_internal;
+
+ begin = encoder->config.begin;
+ if (!begin)
+ return -pte_internal;
+
+ *offset = raw - begin;
+ return 0;
+}
+
+const struct pt_config *pt_enc_get_config(const struct pt_encoder *encoder)
+{
+ if (!encoder)
+ return NULL;
+
+ return &encoder->config;
+}
+
+/* Check the remaining space.
+ *
+ * Returns zero if there are at least \@size bytes of free space available in
+ * \@encoder's Intel PT buffer.
+ *
+ * Returns -pte_eos if not enough space is available.
+ * Returns -pte_internal if \@encoder is NULL.
+ * Returns -pte_internal if \@encoder is not synchronized.
+ */
+static int pt_reserve(const struct pt_encoder *encoder, unsigned int size)
+{
+ const uint8_t *begin, *end, *pos;
+
+ if (!encoder)
+ return -pte_internal;
+
+ /* The encoder is synchronized at all times. */
+ pos = encoder->pos;
+ if (!pos)
+ return -pte_internal;
+
+ begin = encoder->config.begin;
+ end = encoder->config.end;
+
+ pos += size;
+ if (pos < begin || end < pos)
+ return -pte_eos;
+
+ return 0;
+}
+
+/* Return the size of an IP payload based on its IP compression.
+ *
+ * Returns -pte_bad_packet if \@ipc is not a valid IP compression.
+ */
+static int pt_ipc_size(enum pt_ip_compression ipc)
+{
+ switch (ipc) {
+ case pt_ipc_suppressed:
+ return 0;
+
+ case pt_ipc_update_16:
+ return pt_pl_ip_upd16_size;
+
+ case pt_ipc_update_32:
+ return pt_pl_ip_upd32_size;
+
+ case pt_ipc_update_48:
+ return pt_pl_ip_upd48_size;
+
+ case pt_ipc_sext_48:
+ return pt_pl_ip_sext48_size;
+
+ case pt_ipc_full:
+ return pt_pl_ip_full_size;
+ }
+
+ return -pte_invalid;
+}
+
+/* Encode an integer value.
+ *
+ * Writes the \@size least signifficant bytes of \@value starting from \@pos.
+ *
+ * The caller needs to ensure that there is enough space available.
+ *
+ * Returns the updated position.
+ */
+static uint8_t *pt_encode_int(uint8_t *pos, uint64_t val, int size)
+{
+ for (; size; --size, val >>= 8)
+ *pos++ = (uint8_t) val;
+
+ return pos;
+}
+
+/* Encode an IP packet.
+ *
+ * Write an IP packet with opcode \@opc and payload from \@packet if there is
+ * enough space in \@encoder's Intel PT buffer.
+ *
+ * Returns the number of bytes written on success.
+ *
+ * Returns -pte_eos if there is not enough space.
+ * Returns -pte_internal if \@encoder or \@packet is NULL.
+ * Returns -pte_invalid if \@packet.ipc is not valid.
+ */
+static int pt_encode_ip(struct pt_encoder *encoder, enum pt_opcode op,
+ const struct pt_packet_ip *packet)
+{
+ uint8_t *pos;
+ uint8_t opc, ipc;
+ int size, errcode;
+
+ if (!encoder || !packet)
+ return pte_internal;
+
+ size = pt_ipc_size(packet->ipc);
+ if (size < 0)
+ return size;
+
+ errcode = pt_reserve(encoder, /* opc size = */ 1 + size);
+ if (errcode < 0)
+ return errcode;
+
+ /* We already checked the ipc in pt_ipc_size(). */
+ ipc = (uint8_t) (packet->ipc << pt_opm_ipc_shr);
+ opc = (uint8_t) op;
+
+ pos = encoder->pos;
+ *pos++ = opc | ipc;
+
+ encoder->pos = pt_encode_int(pos, packet->ip, size);
+ return /* opc size = */ 1 + size;
+}
+
+int pt_enc_next(struct pt_encoder *encoder, const struct pt_packet *packet)
+{
+ uint8_t *pos, *begin;
+ int errcode;
+
+ if (!encoder || !packet)
+ return -pte_invalid;
+
+ pos = begin = encoder->pos;
+ switch (packet->type) {
+ case ppt_pad:
+ errcode = pt_reserve(encoder, ptps_pad);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_pad;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_psb: {
+ uint64_t psb;
+
+ errcode = pt_reserve(encoder, ptps_psb);
+ if (errcode < 0)
+ return errcode;
+
+ psb = ((uint64_t) pt_psb_hilo << 48 |
+ (uint64_t) pt_psb_hilo << 32 |
+ (uint64_t) pt_psb_hilo << 16 |
+ (uint64_t) pt_psb_hilo);
+
+ pos = pt_encode_int(pos, psb, 8);
+ pos = pt_encode_int(pos, psb, 8);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_psbend:
+ errcode = pt_reserve(encoder, ptps_psbend);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_psbend;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_ovf:
+ errcode = pt_reserve(encoder, ptps_ovf);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_ovf;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_fup:
+ return pt_encode_ip(encoder, pt_opc_fup, &packet->payload.ip);
+
+ case ppt_tip:
+ return pt_encode_ip(encoder, pt_opc_tip, &packet->payload.ip);
+
+ case ppt_tip_pge:
+ return pt_encode_ip(encoder, pt_opc_tip_pge,
+ &packet->payload.ip);
+
+ case ppt_tip_pgd:
+ return pt_encode_ip(encoder, pt_opc_tip_pgd,
+ &packet->payload.ip);
+
+ case ppt_tnt_8: {
+ uint8_t opc, stop;
+
+ if (packet->payload.tnt.bit_size >= 7)
+ return -pte_bad_packet;
+
+ errcode = pt_reserve(encoder, ptps_tnt_8);
+ if (errcode < 0)
+ return errcode;
+
+ stop = packet->payload.tnt.bit_size + pt_opm_tnt_8_shr;
+ opc = (uint8_t)
+ (packet->payload.tnt.payload << pt_opm_tnt_8_shr);
+
+ *pos++ = (uint8_t) (opc | (1u << stop));
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_tnt_64: {
+ uint64_t tnt, stop;
+
+ errcode = pt_reserve(encoder, ptps_tnt_64);
+ if (errcode < 0)
+ return errcode;
+
+ if (packet->payload.tnt.bit_size >= pt_pl_tnt_64_bits)
+ return -pte_invalid;
+
+ stop = 1ull << packet->payload.tnt.bit_size;
+ tnt = packet->payload.tnt.payload;
+
+ if (tnt & ~(stop - 1))
+ return -pte_invalid;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_tnt_64;
+ pos = pt_encode_int(pos, tnt | stop, pt_pl_tnt_64_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_mode: {
+ uint8_t mode;
+
+ errcode = pt_reserve(encoder, ptps_mode);
+ if (errcode < 0)
+ return errcode;
+
+ switch (packet->payload.mode.leaf) {
+ default:
+ return -pte_bad_packet;
+
+ case pt_mol_exec:
+ mode = pt_mol_exec;
+
+ if (packet->payload.mode.bits.exec.csl)
+ mode |= pt_mob_exec_csl;
+
+ if (packet->payload.mode.bits.exec.csd)
+ mode |= pt_mob_exec_csd;
+ break;
+
+ case pt_mol_tsx:
+ mode = pt_mol_tsx;
+
+ if (packet->payload.mode.bits.tsx.intx)
+ mode |= pt_mob_tsx_intx;
+
+ if (packet->payload.mode.bits.tsx.abrt)
+ mode |= pt_mob_tsx_abrt;
+ break;
+ }
+
+ *pos++ = pt_opc_mode;
+ *pos++ = mode;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_pip: {
+ uint64_t cr3;
+
+ errcode = pt_reserve(encoder, ptps_pip);
+ if (errcode < 0)
+ return errcode;
+
+ cr3 = packet->payload.pip.cr3;
+ cr3 >>= pt_pl_pip_shl;
+ cr3 <<= pt_pl_pip_shr;
+
+ if (packet->payload.pip.nr)
+ cr3 |= (uint64_t) pt_pl_pip_nr;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_pip;
+ pos = pt_encode_int(pos, cr3, pt_pl_pip_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_tsc:
+ errcode = pt_reserve(encoder, ptps_tsc);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_tsc;
+ pos = pt_encode_int(pos, packet->payload.tsc.tsc,
+ pt_pl_tsc_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_cbr:
+ errcode = pt_reserve(encoder, ptps_cbr);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_cbr;
+ *pos++ = packet->payload.cbr.ratio;
+ *pos++ = 0;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_tma: {
+ uint16_t ctc, fc;
+
+ errcode = pt_reserve(encoder, ptps_tma);
+ if (errcode < 0)
+ return errcode;
+
+ ctc = packet->payload.tma.ctc;
+ fc = packet->payload.tma.fc;
+
+ if (fc & ~pt_pl_tma_fc_mask)
+ return -pte_bad_packet;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_tma;
+ pos = pt_encode_int(pos, ctc, pt_pl_tma_ctc_size);
+ *pos++ = 0;
+ pos = pt_encode_int(pos, fc, pt_pl_tma_fc_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_mtc:
+ errcode = pt_reserve(encoder, ptps_mtc);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_mtc;
+ *pos++ = packet->payload.mtc.ctc;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_cyc: {
+ uint8_t byte[pt_pl_cyc_max_size], index, end;
+ uint64_t ctc;
+
+ ctc = (uint8_t) packet->payload.cyc.value;
+ ctc <<= pt_opm_cyc_shr;
+
+ byte[0] = pt_opc_cyc;
+ byte[0] |= (uint8_t) ctc;
+
+ ctc = packet->payload.cyc.value;
+ ctc >>= (8 - pt_opm_cyc_shr);
+ if (ctc)
+ byte[0] |= pt_opm_cyc_ext;
+
+ for (end = 1; ctc; ++end) {
+ /* Check if the CYC payload is too big. */
+ if (pt_pl_cyc_max_size <= end)
+ return -pte_bad_packet;
+
+ ctc <<= pt_opm_cycx_shr;
+
+ byte[end] = (uint8_t) ctc;
+
+ ctc >>= 8;
+ if (ctc)
+ byte[end] |= pt_opm_cycx_ext;
+ }
+
+ errcode = pt_reserve(encoder, end);
+ if (errcode < 0)
+ return errcode;
+
+ for (index = 0; index < end; ++index)
+ *pos++ = byte[index];
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_stop:
+ errcode = pt_reserve(encoder, ptps_stop);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_stop;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_vmcs:
+ errcode = pt_reserve(encoder, ptps_vmcs);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_vmcs;
+ pos = pt_encode_int(pos,
+ packet->payload.vmcs.base >> pt_pl_vmcs_shl,
+ pt_pl_vmcs_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_mnt:
+ errcode = pt_reserve(encoder, ptps_mnt);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_ext2;
+ *pos++ = pt_ext2_mnt;
+ pos = pt_encode_int(pos, packet->payload.mnt.payload,
+ pt_pl_mnt_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_exstop: {
+ uint8_t ext;
+
+ errcode = pt_reserve(encoder, ptps_exstop);
+ if (errcode < 0)
+ return errcode;
+
+ ext = packet->payload.exstop.ip ?
+ pt_ext_exstop_ip : pt_ext_exstop;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = ext;
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_mwait:
+ errcode = pt_reserve(encoder, ptps_mwait);
+ if (errcode < 0)
+ return errcode;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_mwait;
+ pos = pt_encode_int(pos, packet->payload.mwait.hints,
+ pt_pl_mwait_hints_size);
+ pos = pt_encode_int(pos, packet->payload.mwait.ext,
+ pt_pl_mwait_ext_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+
+ case ppt_pwre: {
+ uint64_t payload;
+
+ errcode = pt_reserve(encoder, ptps_pwre);
+ if (errcode < 0)
+ return errcode;
+
+ payload = 0ull;
+ payload |= ((uint64_t) packet->payload.pwre.state <<
+ pt_pl_pwre_state_shr) &
+ (uint64_t) pt_pl_pwre_state_mask;
+ payload |= ((uint64_t) packet->payload.pwre.sub_state <<
+ pt_pl_pwre_sub_state_shr) &
+ (uint64_t) pt_pl_pwre_sub_state_mask;
+
+ if (packet->payload.pwre.hw)
+ payload |= (uint64_t) pt_pl_pwre_hw_mask;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_pwre;
+ pos = pt_encode_int(pos, payload, pt_pl_pwre_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_pwrx: {
+ uint64_t payload;
+
+ errcode = pt_reserve(encoder, ptps_pwrx);
+ if (errcode < 0)
+ return errcode;
+
+ payload = 0ull;
+ payload |= ((uint64_t) packet->payload.pwrx.last <<
+ pt_pl_pwrx_last_shr) &
+ (uint64_t) pt_pl_pwrx_last_mask;
+ payload |= ((uint64_t) packet->payload.pwrx.deepest <<
+ pt_pl_pwrx_deepest_shr) &
+ (uint64_t) pt_pl_pwrx_deepest_mask;
+
+ if (packet->payload.pwrx.interrupt)
+ payload |= (uint64_t) pt_pl_pwrx_wr_int;
+ if (packet->payload.pwrx.store)
+ payload |= (uint64_t) pt_pl_pwrx_wr_store;
+ if (packet->payload.pwrx.autonomous)
+ payload |= (uint64_t) pt_pl_pwrx_wr_hw;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = pt_ext_pwrx;
+ pos = pt_encode_int(pos, payload, pt_pl_pwrx_size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_ptw: {
+ uint8_t plc, ext;
+ int size;
+
+ plc = packet->payload.ptw.plc;
+
+ size = pt_ptw_size(plc);
+ if (size < 0)
+ return size;
+
+ errcode = pt_reserve(encoder, pt_opcs_ptw + size);
+ if (errcode < 0)
+ return errcode;
+
+ ext = pt_ext_ptw;
+ ext |= plc << pt_opm_ptw_pb_shr;
+
+ if (packet->payload.ptw.ip)
+ ext |= (uint8_t) pt_opm_ptw_ip;
+
+ *pos++ = pt_opc_ext;
+ *pos++ = ext;
+ pos = pt_encode_int(pos, packet->payload.ptw.payload, size);
+
+ encoder->pos = pos;
+ return (int) (pos - begin);
+ }
+
+ case ppt_unknown:
+ case ppt_invalid:
+ return -pte_bad_opc;
+ }
+
+ return -pte_bad_opc;
+}
+
+int pt_encode_pad(struct pt_encoder *encoder)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_pad;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_psb(struct pt_encoder *encoder)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_psb;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_psbend(struct pt_encoder *encoder)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_psbend;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tip(struct pt_encoder *encoder, uint64_t ip,
+ enum pt_ip_compression ipc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tip;
+ packet.payload.ip.ip = ip;
+ packet.payload.ip.ipc = ipc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tnt_8(struct pt_encoder *encoder, uint8_t tnt, int size)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tnt_8;
+ packet.payload.tnt.bit_size = (uint8_t) size;
+ packet.payload.tnt.payload = tnt;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tnt_64(struct pt_encoder *encoder, uint64_t tnt, int size)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tnt_64;
+ packet.payload.tnt.bit_size = (uint8_t) size;
+ packet.payload.tnt.payload = tnt;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tip_pge(struct pt_encoder *encoder, uint64_t ip,
+ enum pt_ip_compression ipc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tip_pge;
+ packet.payload.ip.ip = ip;
+ packet.payload.ip.ipc = ipc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tip_pgd(struct pt_encoder *encoder, uint64_t ip,
+ enum pt_ip_compression ipc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tip_pgd;
+ packet.payload.ip.ip = ip;
+ packet.payload.ip.ipc = ipc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_fup(struct pt_encoder *encoder, uint64_t ip,
+ enum pt_ip_compression ipc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_fup;
+ packet.payload.ip.ip = ip;
+ packet.payload.ip.ipc = ipc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_pip(struct pt_encoder *encoder, uint64_t cr3, uint8_t flags)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_pip;
+ packet.payload.pip.cr3 = cr3;
+ packet.payload.pip.nr = (flags & pt_pl_pip_nr) != 0;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_ovf(struct pt_encoder *encoder)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_ovf;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_mode_exec(struct pt_encoder *encoder, enum pt_exec_mode mode)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_mode;
+ packet.payload.mode.leaf = pt_mol_exec;
+ packet.payload.mode.bits.exec = pt_set_exec_mode(mode);
+
+ return pt_enc_next(encoder, &packet);
+}
+
+
+int pt_encode_mode_tsx(struct pt_encoder *encoder, uint8_t bits)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_mode;
+ packet.payload.mode.leaf = pt_mol_tsx;
+
+ if (bits & pt_mob_tsx_intx)
+ packet.payload.mode.bits.tsx.intx = 1;
+ else
+ packet.payload.mode.bits.tsx.intx = 0;
+
+ if (bits & pt_mob_tsx_abrt)
+ packet.payload.mode.bits.tsx.abrt = 1;
+ else
+ packet.payload.mode.bits.tsx.abrt = 0;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tsc(struct pt_encoder *encoder, uint64_t tsc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tsc;
+ packet.payload.tsc.tsc = tsc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_cbr(struct pt_encoder *encoder, uint8_t cbr)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_cbr;
+ packet.payload.cbr.ratio = cbr;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_tma(struct pt_encoder *encoder, uint16_t ctc, uint16_t fc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_tma;
+ packet.payload.tma.ctc = ctc;
+ packet.payload.tma.fc = fc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_mtc(struct pt_encoder *encoder, uint8_t ctc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_mtc;
+ packet.payload.mtc.ctc = ctc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_cyc(struct pt_encoder *encoder, uint32_t ctc)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_cyc;
+ packet.payload.cyc.value = ctc;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_stop(struct pt_encoder *encoder)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_stop;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_vmcs(struct pt_encoder *encoder, uint64_t payload)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_vmcs;
+ packet.payload.vmcs.base = payload;
+
+ return pt_enc_next(encoder, &packet);
+}
+
+int pt_encode_mnt(struct pt_encoder *encoder, uint64_t payload)
+{
+ struct pt_packet packet;
+
+ packet.type = ppt_mnt;
+ packet.payload.mnt.payload = payload;
+
+ return pt_enc_next(encoder, &packet);
+}
diff --git a/contrib/processor-trace/libipt/src/pt_error.c b/contrib/processor-trace/libipt/src/pt_error.c
new file mode 100644
index 0000000000000..c3ee81cfdba1e
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_error.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "intel-pt.h"
+
+
+const char *pt_errstr(enum pt_error_code errcode)
+{
+ switch (errcode) {
+ case pte_ok:
+ return "OK";
+
+ case pte_internal:
+ return "internal error";
+
+ case pte_invalid:
+ return "invalid argument";
+
+ case pte_nosync:
+ return "decoder out of sync";
+
+ case pte_bad_opc:
+ return "unknown opcode";
+
+ case pte_bad_packet:
+ return "unknown packet";
+
+ case pte_bad_context:
+ return "unexpected packet context";
+
+ case pte_eos:
+ return "reached end of trace stream";
+
+ case pte_bad_query:
+ return "trace stream does not match query";
+
+ case pte_nomem:
+ return "out of memory";
+
+ case pte_bad_config:
+ return "bad configuration";
+
+ case pte_noip:
+ return "no ip";
+
+ case pte_ip_suppressed:
+ return "ip has been suppressed";
+
+ case pte_nomap:
+ return "no memory mapped at this address";
+
+ case pte_bad_insn:
+ return "unknown instruction";
+
+ case pte_no_time:
+ return "no timing information";
+
+ case pte_no_cbr:
+ return "no core:bus ratio";
+
+ case pte_bad_image:
+ return "bad image";
+
+ case pte_bad_lock:
+ return "locking error";
+
+ case pte_not_supported:
+ return "not supported";
+
+ case pte_retstack_empty:
+ return "compressed return without call";
+
+ case pte_bad_retcomp:
+ return "bad compressed return";
+
+ case pte_bad_status_update:
+ return "bad status update";
+
+ case pte_no_enable:
+ return "expected tracing enabled event";
+
+ case pte_event_ignored:
+ return "event ignored";
+
+ case pte_overflow:
+ return "overflow";
+
+ case pte_bad_file:
+ return "bad file";
+
+ case pte_bad_cpu:
+ return "unknown cpu";
+ }
+
+ /* Should not reach here. */
+ return "internal error.";
+}
diff --git a/contrib/processor-trace/libipt/src/pt_event_queue.c b/contrib/processor-trace/libipt/src/pt_event_queue.c
new file mode 100644
index 0000000000000..89518ea3e0419
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_event_queue.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_event_queue.h"
+
+#include <string.h>
+
+
+static inline uint8_t pt_evq_inc(uint8_t idx)
+{
+ idx += 1;
+ idx %= evq_max;
+
+ return idx;
+}
+
+static struct pt_event *pt_event_init(struct pt_event *event)
+{
+ if (event)
+ memset(event, 0, sizeof(*event));
+
+ return event;
+}
+
+void pt_evq_init(struct pt_event_queue *evq)
+{
+ if (!evq)
+ return;
+
+ memset(evq, 0, sizeof(*evq));
+}
+
+struct pt_event *pt_evq_standalone(struct pt_event_queue *evq)
+{
+ if (!evq)
+ return NULL;
+
+ return pt_event_init(&evq->standalone);
+}
+
+struct pt_event *pt_evq_enqueue(struct pt_event_queue *evq,
+ enum pt_event_binding evb)
+{
+ uint8_t begin, end, gap, idx;
+
+ if (!evq)
+ return NULL;
+
+ if (evb_max <= evb)
+ return NULL;
+
+ begin = evq->begin[evb];
+ idx = evq->end[evb];
+
+ if (evq_max <= begin)
+ return NULL;
+
+ if (evq_max <= idx)
+ return NULL;
+
+ end = pt_evq_inc(idx);
+ gap = pt_evq_inc(end);
+
+ /* Leave a gap so we don't overwrite the last dequeued event. */
+ if (begin == gap)
+ return NULL;
+
+ evq->end[evb] = end;
+
+ return pt_event_init(&evq->queue[evb][idx]);
+}
+
+struct pt_event *pt_evq_dequeue(struct pt_event_queue *evq,
+ enum pt_event_binding evb)
+{
+ uint8_t begin, end;
+
+ if (!evq)
+ return NULL;
+
+ if (evb_max <= evb)
+ return NULL;
+
+ begin = evq->begin[evb];
+ end = evq->end[evb];
+
+ if (evq_max <= begin)
+ return NULL;
+
+ if (evq_max <= end)
+ return NULL;
+
+ if (begin == end)
+ return NULL;
+
+ evq->begin[evb] = pt_evq_inc(begin);
+
+ return &evq->queue[evb][begin];
+}
+
+int pt_evq_clear(struct pt_event_queue *evq, enum pt_event_binding evb)
+{
+ if (!evq)
+ return -pte_internal;
+
+ if (evb_max <= evb)
+ return -pte_internal;
+
+ evq->begin[evb] = 0;
+ evq->end[evb] = 0;
+
+ return 0;
+}
+
+int pt_evq_empty(const struct pt_event_queue *evq, enum pt_event_binding evb)
+{
+ uint8_t begin, end;
+
+ if (!evq)
+ return -pte_internal;
+
+ if (evb_max <= evb)
+ return -pte_internal;
+
+ begin = evq->begin[evb];
+ end = evq->end[evb];
+
+ if (evq_max <= begin)
+ return -pte_internal;
+
+ if (evq_max <= end)
+ return -pte_internal;
+
+ return begin == end;
+}
+
+int pt_evq_pending(const struct pt_event_queue *evq, enum pt_event_binding evb)
+{
+ int errcode;
+
+ errcode = pt_evq_empty(evq, evb);
+ if (errcode < 0)
+ return errcode;
+
+ return !errcode;
+}
+
+struct pt_event *pt_evq_find(struct pt_event_queue *evq,
+ enum pt_event_binding evb,
+ enum pt_event_type evt)
+{
+ uint8_t begin, end;
+
+ if (!evq)
+ return NULL;
+
+ if (evb_max <= evb)
+ return NULL;
+
+ begin = evq->begin[evb];
+ end = evq->end[evb];
+
+ if (evq_max <= begin)
+ return NULL;
+
+ if (evq_max <= end)
+ return NULL;
+
+ for (; begin != end; begin = pt_evq_inc(begin)) {
+ struct pt_event *ev;
+
+ ev = &evq->queue[evb][begin];
+ if (ev->type == evt)
+ return ev;
+ }
+
+ return NULL;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_ild.c b/contrib/processor-trace/libipt/src/pt_ild.c
new file mode 100644
index 0000000000000..a8d78d4102e4a
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_ild.c
@@ -0,0 +1,1223 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_ild.h"
+#include "pti-imm-defs.h"
+#include "pti-imm.h"
+#include "pti-modrm-defs.h"
+#include "pti-modrm.h"
+#include "pti-disp-defs.h"
+#include "pti-disp.h"
+
+#include <string.h>
+
+/* SET UP 3 TABLES */
+
+static uint8_t has_disp_regular[4][4][8];
+
+static void init_has_disp_regular_table(void)
+{
+ uint8_t mod, rm;
+
+ memset(has_disp_regular, 0, sizeof(has_disp_regular));
+
+ /*fill eamode16 */
+ has_disp_regular[ptem_16bit][0][6] = 2;
+ for (rm = 0; rm < 8; rm++)
+ for (mod = 1; mod <= 2; mod++)
+ has_disp_regular[ptem_16bit][mod][rm] = mod;
+
+ /*fill eamode32/64 */
+ has_disp_regular[ptem_32bit][0][5] = 4;
+ has_disp_regular[ptem_64bit][0][5] = 4;
+ for (rm = 0; rm < 8; rm++) {
+ has_disp_regular[ptem_32bit][1][rm] = 1;
+ has_disp_regular[ptem_32bit][2][rm] = 4;
+
+ has_disp_regular[ptem_64bit][1][rm] = 1;
+ has_disp_regular[ptem_64bit][2][rm] = 4;
+ }
+}
+
+static uint8_t eamode_table[2][4];
+
+static void init_eamode_table(void)
+{
+ eamode_table[0][ptem_unknown] = ptem_unknown;
+ eamode_table[0][ptem_16bit] = ptem_16bit;
+ eamode_table[0][ptem_32bit] = ptem_32bit;
+ eamode_table[0][ptem_64bit] = ptem_64bit;
+
+ eamode_table[1][ptem_unknown] = ptem_unknown;
+ eamode_table[1][ptem_16bit] = ptem_32bit;
+ eamode_table[1][ptem_32bit] = ptem_16bit;
+ eamode_table[1][ptem_64bit] = ptem_32bit;
+}
+
+static uint8_t has_sib_table[4][4][8];
+
+static void init_has_sib_table(void)
+{
+ uint8_t mod;
+
+ memset(has_sib_table, 0, sizeof(has_sib_table));
+
+ /*for eamode32/64 there is sib byte for mod!=3 and rm==4 */
+ for (mod = 0; mod <= 2; mod++) {
+ has_sib_table[ptem_32bit][mod][4] = 1;
+ has_sib_table[ptem_64bit][mod][4] = 1;
+ }
+}
+
+/* SOME ACCESSORS */
+
+static inline uint8_t get_byte(const struct pt_ild *ild, uint8_t i)
+{
+ return ild->itext[i];
+}
+
+static inline uint8_t const *get_byte_ptr(const struct pt_ild *ild, uint8_t i)
+{
+ return ild->itext + i;
+}
+
+static inline int mode_64b(const struct pt_ild *ild)
+{
+ return ild->mode == ptem_64bit;
+}
+
+static inline int mode_32b(const struct pt_ild *ild)
+{
+ return ild->mode == ptem_32bit;
+}
+
+static inline int bits_match(uint8_t x, uint8_t mask, uint8_t target)
+{
+ return (x & mask) == target;
+}
+
+static inline enum pt_exec_mode
+pti_get_nominal_eosz_non64(const struct pt_ild *ild)
+{
+ if (mode_32b(ild)) {
+ if (ild->u.s.osz)
+ return ptem_16bit;
+ return ptem_32bit;
+ }
+ if (ild->u.s.osz)
+ return ptem_32bit;
+ return ptem_16bit;
+}
+
+static inline enum pt_exec_mode
+pti_get_nominal_eosz(const struct pt_ild *ild)
+{
+ if (mode_64b(ild)) {
+ if (ild->u.s.rex_w)
+ return ptem_64bit;
+ if (ild->u.s.osz)
+ return ptem_16bit;
+ return ptem_32bit;
+ }
+ return pti_get_nominal_eosz_non64(ild);
+}
+
+static inline enum pt_exec_mode
+pti_get_nominal_eosz_df64(const struct pt_ild *ild)
+{
+ if (mode_64b(ild)) {
+ if (ild->u.s.rex_w)
+ return ptem_64bit;
+ if (ild->u.s.osz)
+ return ptem_16bit;
+ /* only this next line of code is different relative
+ to pti_get_nominal_eosz(), above */
+ return ptem_64bit;
+ }
+ return pti_get_nominal_eosz_non64(ild);
+}
+
+static inline enum pt_exec_mode
+pti_get_nominal_easz_non64(const struct pt_ild *ild)
+{
+ if (mode_32b(ild)) {
+ if (ild->u.s.asz)
+ return ptem_16bit;
+ return ptem_32bit;
+ }
+ if (ild->u.s.asz)
+ return ptem_32bit;
+ return ptem_16bit;
+}
+
+static inline enum pt_exec_mode
+pti_get_nominal_easz(const struct pt_ild *ild)
+{
+ if (mode_64b(ild)) {
+ if (ild->u.s.asz)
+ return ptem_32bit;
+ return ptem_64bit;
+ }
+ return pti_get_nominal_easz_non64(ild);
+}
+
+static inline int resolve_z(uint8_t *pbytes, enum pt_exec_mode eosz)
+{
+ static const uint8_t bytes[] = { 2, 4, 4 };
+ unsigned int idx;
+
+ if (!pbytes)
+ return -pte_internal;
+
+ idx = (unsigned int) eosz - 1;
+ if (sizeof(bytes) <= idx)
+ return -pte_bad_insn;
+
+ *pbytes = bytes[idx];
+ return 0;
+}
+
+static inline int resolve_v(uint8_t *pbytes, enum pt_exec_mode eosz)
+{
+ static const uint8_t bytes[] = { 2, 4, 8 };
+ unsigned int idx;
+
+ if (!pbytes)
+ return -pte_internal;
+
+ idx = (unsigned int) eosz - 1;
+ if (sizeof(bytes) <= idx)
+ return -pte_bad_insn;
+
+ *pbytes = bytes[idx];
+ return 0;
+}
+
+/* DECODERS */
+
+static int set_imm_bytes(struct pt_ild *ild)
+{
+ /*: set ild->imm1_bytes and ild->imm2_bytes for maps 0/1 */
+ static uint8_t const *const map_map[] = {
+ /* map 0 */ imm_bytes_map_0x0,
+ /* map 1 */ imm_bytes_map_0x0F
+ };
+ uint8_t map, imm_code;
+
+ if (!ild)
+ return -pte_internal;
+
+ map = ild->map;
+
+ if ((sizeof(map_map) / sizeof(*map_map)) <= map)
+ return 0;
+
+ imm_code = map_map[map][ild->nominal_opcode];
+ switch (imm_code) {
+ case PTI_IMM_NONE:
+ case PTI_0_IMM_WIDTH_CONST_l2:
+ default:
+ return 0;
+
+ case PTI_UIMM8_IMM_WIDTH_CONST_l2:
+ ild->imm1_bytes = 1;
+ return 0;
+
+ case PTI_SIMM8_IMM_WIDTH_CONST_l2:
+ ild->imm1_bytes = 1;
+ return 0;
+
+ case PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2:
+ /* SIMMz(eosz) */
+ return resolve_z(&ild->imm1_bytes, pti_get_nominal_eosz(ild));
+
+ case PTI_UIMMv_IMM_WIDTH_OSZ_NONTERM_EOSZ_l2:
+ /* UIMMv(eosz) */
+ return resolve_v(&ild->imm1_bytes, pti_get_nominal_eosz(ild));
+
+ case PTI_UIMM16_IMM_WIDTH_CONST_l2:
+ ild->imm1_bytes = 2;
+ return 0;
+
+ case PTI_SIMMz_IMM_WIDTH_OSZ_NONTERM_DF64_EOSZ_l2:
+ /* push defaults to eosz64 in 64b mode, then uses SIMMz */
+ return resolve_z(&ild->imm1_bytes,
+ pti_get_nominal_eosz_df64(ild));
+
+ case PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf7_l1:
+ if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) < 2) {
+ return resolve_z(&ild->imm1_bytes,
+ pti_get_nominal_eosz(ild));
+ }
+ return 0;
+
+ case PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xc7_l1:
+ if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) == 0) {
+ return resolve_z(&ild->imm1_bytes,
+ pti_get_nominal_eosz(ild));
+ }
+ return 0;
+
+ case PTI_RESOLVE_BYREG_IMM_WIDTH_map0x0_op0xf6_l1:
+ if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) < 2)
+ ild->imm1_bytes = 1;
+
+ return 0;
+
+ case PTI_IMM_hasimm_map0x0_op0xc8_l1:
+ if (ild->map == PTI_MAP_0) {
+ /*enter -> imm1=2, imm2=1 */
+ ild->imm1_bytes = 2;
+ ild->imm2_bytes = 1;
+ }
+ return 0;
+
+ case PTI_IMM_hasimm_map0x0F_op0x78_l1:
+ /* AMD SSE4a (insertq/extrq use osz/f2) vs vmread
+ * (no prefixes)
+ */
+ if (ild->map == PTI_MAP_1) {
+ if (ild->u.s.osz || ild->u.s.last_f2f3 == 2) {
+ ild->imm1_bytes = 1;
+ ild->imm2_bytes = 1;
+ }
+ }
+ return 0;
+ }
+}
+
+static int imm_dec(struct pt_ild *ild, uint8_t length)
+{
+ int errcode;
+
+ if (!ild)
+ return -pte_internal;
+
+ if (ild->map == PTI_MAP_AMD3DNOW) {
+ if (ild->max_bytes <= length)
+ return -pte_bad_insn;
+
+ ild->nominal_opcode = get_byte(ild, length);
+ return length + 1;
+ }
+
+ errcode = set_imm_bytes(ild);
+ if (errcode < 0)
+ return errcode;
+
+ length += ild->imm1_bytes;
+ length += ild->imm2_bytes;
+ if (ild->max_bytes < length)
+ return -pte_bad_insn;
+
+ return length;
+}
+
+static int compute_disp_dec(struct pt_ild *ild)
+{
+ /* set ild->disp_bytes for maps 0 and 1. */
+ static uint8_t const *const map_map[] = {
+ /* map 0 */ disp_bytes_map_0x0,
+ /* map 1 */ disp_bytes_map_0x0F
+ };
+ uint8_t map, disp_kind;
+
+ if (!ild)
+ return -pte_internal;
+
+ if (0 < ild->disp_bytes)
+ return 0;
+
+ map = ild->map;
+
+ if ((sizeof(map_map) / sizeof(*map_map)) <= map)
+ return 0;
+
+ disp_kind = map_map[map][ild->nominal_opcode];
+ switch (disp_kind) {
+ case PTI_DISP_NONE:
+ ild->disp_bytes = 0;
+ return 0;
+
+ case PTI_PRESERVE_DEFAULT:
+ /* nothing to do */
+ return 0;
+
+ case PTI_BRDISP8:
+ ild->disp_bytes = 1;
+ return 0;
+
+ case PTI_DISP_BUCKET_0_l1:
+ /* BRDISPz(eosz) for 16/32 modes, and BRDISP32 for 64b mode */
+ if (mode_64b(ild)) {
+ ild->disp_bytes = 4;
+ return 0;
+ }
+
+ return resolve_z(&ild->disp_bytes,
+ pti_get_nominal_eosz(ild));
+
+ case PTI_MEMDISPv_DISP_WIDTH_ASZ_NONTERM_EASZ_l2:
+ /* MEMDISPv(easz) */
+ return resolve_v(&ild->disp_bytes, pti_get_nominal_easz(ild));
+
+ case PTI_BRDISPz_BRDISP_WIDTH_OSZ_NONTERM_EOSZ_l2:
+ /* BRDISPz(eosz) for 16/32/64 modes */
+ return resolve_z(&ild->disp_bytes, pti_get_nominal_eosz(ild));
+
+ case PTI_RESOLVE_BYREG_DISP_map0x0_op0xc7_l1:
+ /* reg=0 -> preserve, reg=7 -> BRDISPz(eosz) */
+ if (ild->map == PTI_MAP_0 && pti_get_modrm_reg(ild) == 7) {
+ return resolve_z(&ild->disp_bytes,
+ pti_get_nominal_eosz(ild));
+ }
+ return 0;
+
+ default:
+ return -pte_bad_insn;
+ }
+}
+
+static int disp_dec(struct pt_ild *ild, uint8_t length)
+{
+ uint8_t disp_bytes;
+ int errcode;
+
+ if (!ild)
+ return -pte_internal;
+
+ errcode = compute_disp_dec(ild);
+ if (errcode < 0)
+ return errcode;
+
+ disp_bytes = ild->disp_bytes;
+ if (disp_bytes == 0)
+ return imm_dec(ild, length);
+
+ if (length + disp_bytes > ild->max_bytes)
+ return -pte_bad_insn;
+
+ /*Record only position; must be able to re-read itext bytes for actual
+ value. (SMC/CMC issue). */
+ ild->disp_pos = length;
+
+ return imm_dec(ild, length + disp_bytes);
+}
+
+static int sib_dec(struct pt_ild *ild, uint8_t length)
+{
+ uint8_t sib;
+
+ if (!ild)
+ return -pte_internal;
+
+ if (ild->max_bytes <= length)
+ return -pte_bad_insn;
+
+ sib = get_byte(ild, length);
+ if ((sib & 0x07) == 0x05 && pti_get_modrm_mod(ild) == 0)
+ ild->disp_bytes = 4;
+
+ return disp_dec(ild, length + 1);
+}
+
+static int modrm_dec(struct pt_ild *ild, uint8_t length)
+{
+ static uint8_t const *const has_modrm_2d[2] = {
+ has_modrm_map_0x0,
+ has_modrm_map_0x0F
+ };
+ int has_modrm = PTI_MODRM_FALSE;
+ pti_map_enum_t map;
+
+ if (!ild)
+ return -pte_internal;
+
+ map = pti_get_map(ild);
+ if (map >= PTI_MAP_2)
+ has_modrm = PTI_MODRM_TRUE;
+ else
+ has_modrm = has_modrm_2d[map][ild->nominal_opcode];
+
+ if (has_modrm == PTI_MODRM_FALSE || has_modrm == PTI_MODRM_UNDEF)
+ return disp_dec(ild, length);
+
+ /* really >= here because we have not eaten the byte yet */
+ if (length >= ild->max_bytes)
+ return -pte_bad_insn;
+
+ ild->modrm_byte = get_byte(ild, length);
+
+ if (has_modrm != PTI_MODRM_IGNORE_MOD) {
+ /* set disp_bytes and sib using simple tables */
+
+ uint8_t eamode = eamode_table[ild->u.s.asz][ild->mode];
+ uint8_t mod = (uint8_t) pti_get_modrm_mod(ild);
+ uint8_t rm = (uint8_t) pti_get_modrm_rm(ild);
+ uint8_t has_sib;
+
+ ild->disp_bytes = has_disp_regular[eamode][mod][rm];
+
+ has_sib = has_sib_table[eamode][mod][rm];
+ if (has_sib)
+ return sib_dec(ild, length + 1);
+ }
+
+ return disp_dec(ild, length + 1);
+}
+
+static inline int get_next_as_opcode(struct pt_ild *ild, uint8_t length)
+{
+ if (!ild)
+ return -pte_internal;
+
+ if (ild->max_bytes <= length)
+ return -pte_bad_insn;
+
+ ild->nominal_opcode = get_byte(ild, length);
+
+ return modrm_dec(ild, length + 1);
+}
+
+static int opcode_dec(struct pt_ild *ild, uint8_t length)
+{
+ uint8_t b, m;
+
+ if (!ild)
+ return -pte_internal;
+
+ /*no need to check max_bytes - it was checked in previous scanners */
+ b = get_byte(ild, length);
+ if (b != 0x0F) { /* 1B opcodes, map 0 */
+ ild->map = PTI_MAP_0;
+ ild->nominal_opcode = b;
+
+ return modrm_dec(ild, length + 1);
+ }
+
+ length++; /* eat the 0x0F */
+
+ if (ild->max_bytes <= length)
+ return -pte_bad_insn;
+
+ /* 0x0F opcodes MAPS 1,2,3 */
+ m = get_byte(ild, length);
+ if (m == 0x38) {
+ ild->map = PTI_MAP_2;
+
+ return get_next_as_opcode(ild, length + 1);
+ } else if (m == 0x3A) {
+ ild->map = PTI_MAP_3;
+ ild->imm1_bytes = 1;
+
+ return get_next_as_opcode(ild, length + 1);
+ } else if (bits_match(m, 0xf8, 0x38)) {
+ ild->map = PTI_MAP_INVALID;
+
+ return get_next_as_opcode(ild, length + 1);
+ } else if (m == 0x0F) { /* 3dNow */
+ ild->map = PTI_MAP_AMD3DNOW;
+ ild->imm1_bytes = 1;
+ /* real opcode is in immediate later on, but we need an
+ * opcode now. */
+ ild->nominal_opcode = 0x0F;
+
+ return modrm_dec(ild, length + 1);
+ } else { /* map 1 (simple two byte opcodes) */
+ ild->nominal_opcode = m;
+ ild->map = PTI_MAP_1;
+
+ return modrm_dec(ild, length + 1);
+ }
+}
+
+typedef int (*prefix_decoder)(struct pt_ild *ild, uint8_t length, uint8_t rex);
+static prefix_decoder prefix_table[256];
+
+static inline int prefix_decode(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ uint8_t byte;
+
+ if (!ild)
+ return -pte_internal;
+
+ if (ild->max_bytes <= length)
+ return -pte_bad_insn;
+
+ byte = get_byte(ild, length);
+
+ return prefix_table[byte](ild, length, rex);
+}
+
+static inline int prefix_next(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ return prefix_decode(ild, length + 1, rex);
+}
+
+static int prefix_osz(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ ild->u.s.osz = 1;
+
+ return prefix_next(ild, length, 0);
+}
+
+static int prefix_asz(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ ild->u.s.asz = 1;
+
+ return prefix_next(ild, length, 0);
+}
+
+static int prefix_lock(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ ild->u.s.lock = 1;
+
+ return prefix_next(ild, length, 0);
+}
+
+static int prefix_f2(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ ild->u.s.f2 = 1;
+ ild->u.s.last_f2f3 = 2;
+
+ return prefix_next(ild, length, 0);
+}
+
+static int prefix_f3(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ ild->u.s.f3 = 1;
+ ild->u.s.last_f2f3 = 3;
+
+ return prefix_next(ild, length, 0);
+}
+
+static int prefix_ignore(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ return prefix_next(ild, length, 0);
+}
+
+static int prefix_done(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ if (!ild)
+ return -pte_internal;
+
+ if (rex & 0x04)
+ ild->u.s.rex_r = 1;
+ if (rex & 0x08)
+ ild->u.s.rex_w = 1;
+
+ return opcode_dec(ild, length);
+}
+
+static int prefix_rex(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ if (mode_64b(ild))
+ return prefix_next(ild, length, get_byte(ild, length));
+ else
+ return opcode_dec(ild, length);
+}
+
+static inline int prefix_vex_done(struct pt_ild *ild, uint8_t length)
+{
+ if (!ild)
+ return -pte_internal;
+
+ ild->nominal_opcode = get_byte(ild, length);
+
+ return modrm_dec(ild, length + 1);
+}
+
+static int prefix_vex_c5(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ uint8_t max_bytes;
+ uint8_t p1;
+
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ max_bytes = ild->max_bytes;
+
+ /* Read the next byte to validate that this is indeed VEX. */
+ if (max_bytes <= (length + 1))
+ return -pte_bad_insn;
+
+ p1 = get_byte(ild, length + 1);
+
+ /* If p1[7:6] is not 11b in non-64-bit mode, this is LDS, not VEX. */
+ if (!mode_64b(ild) && !bits_match(p1, 0xc0, 0xc0))
+ return opcode_dec(ild, length);
+
+ /* We need at least 3 bytes
+ * - 2 for the VEX prefix and payload and
+ * - 1 for the opcode.
+ */
+ if (max_bytes < (length + 3))
+ return -pte_bad_insn;
+
+ ild->u.s.vex = 1;
+ if (p1 & 0x80)
+ ild->u.s.rex_r = 1;
+
+ ild->map = PTI_MAP_1;
+
+ /* Eat the VEX. */
+ length += 2;
+ return prefix_vex_done(ild, length);
+}
+
+static int prefix_vex_c4(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ uint8_t max_bytes;
+ uint8_t p1, p2, map;
+
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ max_bytes = ild->max_bytes;
+
+ /* Read the next byte to validate that this is indeed VEX. */
+ if (max_bytes <= (length + 1))
+ return -pte_bad_insn;
+
+ p1 = get_byte(ild, length + 1);
+
+ /* If p1[7:6] is not 11b in non-64-bit mode, this is LES, not VEX. */
+ if (!mode_64b(ild) && !bits_match(p1, 0xc0, 0xc0))
+ return opcode_dec(ild, length);
+
+ /* We need at least 4 bytes
+ * - 3 for the VEX prefix and payload and
+ * - 1 for the opcode.
+ */
+ if (max_bytes < (length + 4))
+ return -pte_bad_insn;
+
+ p2 = get_byte(ild, length + 2);
+
+ ild->u.s.vex = 1;
+ if (p1 & 0x80)
+ ild->u.s.rex_r = 1;
+ if (p2 & 0x80)
+ ild->u.s.rex_w = 1;
+
+ map = p1 & 0x1f;
+ if (PTI_MAP_INVALID <= map)
+ return -pte_bad_insn;
+
+ ild->map = map;
+ if (map == PTI_MAP_3)
+ ild->imm1_bytes = 1;
+
+ /* Eat the VEX. */
+ length += 3;
+ return prefix_vex_done(ild, length);
+}
+
+static int prefix_evex(struct pt_ild *ild, uint8_t length, uint8_t rex)
+{
+ uint8_t max_bytes;
+ uint8_t p1, p2, map;
+
+ (void) rex;
+
+ if (!ild)
+ return -pte_internal;
+
+ max_bytes = ild->max_bytes;
+
+ /* Read the next byte to validate that this is indeed EVEX. */
+ if (max_bytes <= (length + 1))
+ return -pte_bad_insn;
+
+ p1 = get_byte(ild, length + 1);
+
+ /* If p1[7:6] is not 11b in non-64-bit mode, this is BOUND, not EVEX. */
+ if (!mode_64b(ild) && !bits_match(p1, 0xc0, 0xc0))
+ return opcode_dec(ild, length);
+
+ /* We need at least 5 bytes
+ * - 4 for the EVEX prefix and payload and
+ * - 1 for the opcode.
+ */
+ if (max_bytes < (length + 5))
+ return -pte_bad_insn;
+
+ p2 = get_byte(ild, length + 2);
+
+ ild->u.s.vex = 1;
+ if (p1 & 0x80)
+ ild->u.s.rex_r = 1;
+ if (p2 & 0x80)
+ ild->u.s.rex_w = 1;
+
+ map = p1 & 0x03;
+ ild->map = map;
+
+ if (map == PTI_MAP_3)
+ ild->imm1_bytes = 1;
+
+ /* Eat the EVEX. */
+ length += 4;
+ return prefix_vex_done(ild, length);
+}
+
+static void init_prefix_table(void)
+{
+ unsigned int byte;
+
+ for (byte = 0; byte <= 0xff; ++byte)
+ prefix_table[byte] = prefix_done;
+
+ prefix_table[0x66] = prefix_osz;
+ prefix_table[0x67] = prefix_asz;
+
+ /* Segment prefixes. */
+ prefix_table[0x2e] = prefix_ignore;
+ prefix_table[0x3e] = prefix_ignore;
+ prefix_table[0x26] = prefix_ignore;
+ prefix_table[0x36] = prefix_ignore;
+ prefix_table[0x64] = prefix_ignore;
+ prefix_table[0x65] = prefix_ignore;
+
+ prefix_table[0xf0] = prefix_lock;
+ prefix_table[0xf2] = prefix_f2;
+ prefix_table[0xf3] = prefix_f3;
+
+ for (byte = 0x40; byte <= 0x4f; ++byte)
+ prefix_table[byte] = prefix_rex;
+
+ prefix_table[0xc4] = prefix_vex_c4;
+ prefix_table[0xc5] = prefix_vex_c5;
+ prefix_table[0x62] = prefix_evex;
+}
+
+static int decode(struct pt_ild *ild)
+{
+ return prefix_decode(ild, 0, 0);
+}
+
+static int set_branch_target(struct pt_insn_ext *iext, const struct pt_ild *ild)
+{
+ if (!iext || !ild)
+ return -pte_internal;
+
+ iext->variant.branch.is_direct = 1;
+
+ if (ild->disp_bytes == 1) {
+ const int8_t *b = (const int8_t *)
+ get_byte_ptr(ild, ild->disp_pos);
+
+ iext->variant.branch.displacement = *b;
+ } else if (ild->disp_bytes == 2) {
+ const int16_t *w = (const int16_t *)
+ get_byte_ptr(ild, ild->disp_pos);
+
+ iext->variant.branch.displacement = *w;
+ } else if (ild->disp_bytes == 4) {
+ const int32_t *d = (const int32_t *)
+ get_byte_ptr(ild, ild->disp_pos);
+
+ iext->variant.branch.displacement = *d;
+ } else
+ return -pte_bad_insn;
+
+ return 0;
+}
+
+/* MAIN ENTRY POINTS */
+
+void pt_ild_init(void)
+{ /* initialization */
+ init_has_disp_regular_table();
+ init_has_sib_table();
+ init_eamode_table();
+ init_prefix_table();
+}
+
+static int pt_instruction_length_decode(struct pt_ild *ild)
+{
+ if (!ild)
+ return -pte_internal;
+
+ ild->u.i = 0;
+ ild->imm1_bytes = 0;
+ ild->imm2_bytes = 0;
+ ild->disp_bytes = 0;
+ ild->modrm_byte = 0;
+ ild->map = PTI_MAP_INVALID;
+
+ if (!ild->mode)
+ return -pte_bad_insn;
+
+ return decode(ild);
+}
+
+static int pt_instruction_decode(struct pt_insn *insn, struct pt_insn_ext *iext,
+ const struct pt_ild *ild)
+{
+ uint8_t opcode, map;
+
+ if (!iext || !ild)
+ return -pte_internal;
+
+ iext->iclass = PTI_INST_INVALID;
+ memset(&iext->variant, 0, sizeof(iext->variant));
+
+ insn->iclass = ptic_other;
+
+ opcode = ild->nominal_opcode;
+ map = ild->map;
+
+ if (map > PTI_MAP_1)
+ return 0; /* uninteresting */
+ if (ild->u.s.vex)
+ return 0; /* uninteresting */
+
+ /* PTI_INST_JCC, 70...7F, 0F (0x80...0x8F) */
+ if (opcode >= 0x70 && opcode <= 0x7F) {
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_cond_jump;
+ iext->iclass = PTI_INST_JCC;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+ }
+ if (opcode >= 0x80 && opcode <= 0x8F) {
+ if (map == PTI_MAP_1) {
+ insn->iclass = ptic_cond_jump;
+ iext->iclass = PTI_INST_JCC;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+ }
+
+ switch (ild->nominal_opcode) {
+ case 0x9A:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_CALL_9A;
+ }
+ return 0;
+
+ case 0xFF:
+ if (map == PTI_MAP_0) {
+ uint8_t reg = pti_get_modrm_reg(ild);
+
+ if (reg == 2) {
+ insn->iclass = ptic_call;
+ iext->iclass = PTI_INST_CALL_FFr2;
+ } else if (reg == 3) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_CALL_FFr3;
+ } else if (reg == 4) {
+ insn->iclass = ptic_jump;
+ iext->iclass = PTI_INST_JMP_FFr4;
+ } else if (reg == 5) {
+ insn->iclass = ptic_far_jump;
+ iext->iclass = PTI_INST_JMP_FFr5;
+ }
+ }
+ return 0;
+
+ case 0xE8:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_call;
+ iext->iclass = PTI_INST_CALL_E8;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0xCD:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_INT;
+ }
+
+ return 0;
+
+ case 0xCC:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_INT3;
+ }
+
+ return 0;
+
+ case 0xCE:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_INTO;
+ }
+
+ return 0;
+
+ case 0xF1:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_INT1;
+ }
+
+ return 0;
+
+ case 0xCF:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_IRET;
+ }
+ return 0;
+
+ case 0xE9:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_jump;
+ iext->iclass = PTI_INST_JMP_E9;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0xEA:
+ if (map == PTI_MAP_0) {
+ /* Far jumps are treated as indirect jumps. */
+ insn->iclass = ptic_far_jump;
+ iext->iclass = PTI_INST_JMP_EA;
+ }
+ return 0;
+
+ case 0xEB:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_jump;
+ iext->iclass = PTI_INST_JMP_EB;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0xE3:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_cond_jump;
+ iext->iclass = PTI_INST_JrCXZ;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0xE0:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_cond_jump;
+ iext->iclass = PTI_INST_LOOPNE;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0xE1:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_cond_jump;
+ iext->iclass = PTI_INST_LOOPE;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0xE2:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_cond_jump;
+ iext->iclass = PTI_INST_LOOP;
+
+ return set_branch_target(iext, ild);
+ }
+ return 0;
+
+ case 0x22:
+ if (map == PTI_MAP_1)
+ if (pti_get_modrm_reg(ild) == 3)
+ if (!ild->u.s.rex_r)
+ iext->iclass = PTI_INST_MOV_CR3;
+
+ return 0;
+
+ case 0xC3:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_return;
+ iext->iclass = PTI_INST_RET_C3;
+ }
+ return 0;
+
+ case 0xC2:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_return;
+ iext->iclass = PTI_INST_RET_C2;
+ }
+ return 0;
+
+ case 0xCB:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_RET_CB;
+ }
+ return 0;
+
+ case 0xCA:
+ if (map == PTI_MAP_0) {
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_RET_CA;
+ }
+ return 0;
+
+ case 0x05:
+ if (map == PTI_MAP_1) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_SYSCALL;
+ }
+ return 0;
+
+ case 0x34:
+ if (map == PTI_MAP_1) {
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_SYSENTER;
+ }
+ return 0;
+
+ case 0x35:
+ if (map == PTI_MAP_1) {
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_SYSEXIT;
+ }
+ return 0;
+
+ case 0x07:
+ if (map == PTI_MAP_1) {
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_SYSRET;
+ }
+ return 0;
+
+ case 0x01:
+ if (map == PTI_MAP_1) {
+ switch (ild->modrm_byte) {
+ case 0xc1:
+ insn->iclass = ptic_far_call;
+ iext->iclass = PTI_INST_VMCALL;
+ break;
+
+ case 0xc2:
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_VMLAUNCH;
+ break;
+
+ case 0xc3:
+ insn->iclass = ptic_far_return;
+ iext->iclass = PTI_INST_VMRESUME;
+ break;
+
+ default:
+ break;
+ }
+ }
+ return 0;
+
+ case 0xc7:
+ if (map == PTI_MAP_1 &&
+ pti_get_modrm_mod(ild) != 3 &&
+ pti_get_modrm_reg(ild) == 6)
+ iext->iclass = PTI_INST_VMPTRLD;
+
+ return 0;
+
+ case 0xae:
+ if (map == PTI_MAP_1 && ild->u.s.f3 && !ild->u.s.osz &&
+ pti_get_modrm_reg(ild) == 4) {
+ insn->iclass = ptic_ptwrite;
+ iext->iclass = PTI_INST_PTWRITE;
+ }
+ return 0;
+
+ default:
+ return 0;
+ }
+}
+
+int pt_ild_decode(struct pt_insn *insn, struct pt_insn_ext *iext)
+{
+ struct pt_ild ild;
+ int size;
+
+ if (!insn || !iext)
+ return -pte_internal;
+
+ ild.mode = insn->mode;
+ ild.itext = insn->raw;
+ ild.max_bytes = insn->size;
+
+ size = pt_instruction_length_decode(&ild);
+ if (size < 0)
+ return size;
+
+ insn->size = (uint8_t) size;
+
+ return pt_instruction_decode(insn, iext, &ild);
+}
diff --git a/contrib/processor-trace/libipt/src/pt_image.c b/contrib/processor-trace/libipt/src/pt_image.c
new file mode 100644
index 0000000000000..b22c62601a56d
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_image.c
@@ -0,0 +1,718 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_image.h"
+#include "pt_section.h"
+#include "pt_asid.h"
+#include "pt_image_section_cache.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+
+static char *dupstr(const char *str)
+{
+ char *dup;
+ size_t len;
+
+ if (!str)
+ return NULL;
+
+ len = strlen(str);
+ dup = malloc(len + 1);
+ if (!dup)
+ return NULL;
+
+ return strcpy(dup, str);
+}
+
+static struct pt_section_list *pt_mk_section_list(struct pt_section *section,
+ const struct pt_asid *asid,
+ uint64_t vaddr,
+ uint64_t offset,
+ uint64_t size, int isid)
+{
+ struct pt_section_list *list;
+ int errcode;
+
+ list = malloc(sizeof(*list));
+ if (!list)
+ return NULL;
+
+ memset(list, 0, sizeof(*list));
+
+ errcode = pt_section_get(section);
+ if (errcode < 0)
+ goto out_mem;
+
+ pt_msec_init(&list->section, section, asid, vaddr, offset, size);
+ list->isid = isid;
+
+ return list;
+
+out_mem:
+ free(list);
+ return NULL;
+}
+
+static void pt_section_list_free(struct pt_section_list *list)
+{
+ if (!list)
+ return;
+
+ pt_section_put(list->section.section);
+ pt_msec_fini(&list->section);
+ free(list);
+}
+
+static void pt_section_list_free_tail(struct pt_section_list *list)
+{
+ while (list) {
+ struct pt_section_list *trash;
+
+ trash = list;
+ list = list->next;
+
+ pt_section_list_free(trash);
+ }
+}
+
+void pt_image_init(struct pt_image *image, const char *name)
+{
+ if (!image)
+ return;
+
+ memset(image, 0, sizeof(*image));
+
+ image->name = dupstr(name);
+}
+
+void pt_image_fini(struct pt_image *image)
+{
+ if (!image)
+ return;
+
+ pt_section_list_free_tail(image->sections);
+ free(image->name);
+
+ memset(image, 0, sizeof(*image));
+}
+
+struct pt_image *pt_image_alloc(const char *name)
+{
+ struct pt_image *image;
+
+ image = malloc(sizeof(*image));
+ if (image)
+ pt_image_init(image, name);
+
+ return image;
+}
+
+void pt_image_free(struct pt_image *image)
+{
+ pt_image_fini(image);
+ free(image);
+}
+
+const char *pt_image_name(const struct pt_image *image)
+{
+ if (!image)
+ return NULL;
+
+ return image->name;
+}
+
+int pt_image_add(struct pt_image *image, struct pt_section *section,
+ const struct pt_asid *asid, uint64_t vaddr, int isid)
+{
+ struct pt_section_list **list, *next, *removed, *new;
+ uint64_t size, begin, end;
+ int errcode;
+
+ if (!image || !section)
+ return -pte_internal;
+
+ size = pt_section_size(section);
+ begin = vaddr;
+ end = begin + size;
+
+ next = pt_mk_section_list(section, asid, begin, 0ull, size, isid);
+ if (!next)
+ return -pte_nomem;
+
+ removed = NULL;
+ errcode = 0;
+
+ /* Check for overlaps while we move to the end of the list. */
+ list = &(image->sections);
+ while (*list) {
+ const struct pt_mapped_section *msec;
+ const struct pt_asid *masid;
+ struct pt_section_list *current;
+ struct pt_section *lsec;
+ uint64_t lbegin, lend, loff;
+
+ current = *list;
+ msec = &current->section;
+ masid = pt_msec_asid(msec);
+
+ errcode = pt_asid_match(masid, asid);
+ if (errcode < 0)
+ break;
+
+ if (!errcode) {
+ list = &((*list)->next);
+ continue;
+ }
+
+ lbegin = pt_msec_begin(msec);
+ lend = pt_msec_end(msec);
+
+ if ((end <= lbegin) || (lend <= begin)) {
+ list = &((*list)->next);
+ continue;
+ }
+
+ /* The new section overlaps with @msec's section. */
+ lsec = pt_msec_section(msec);
+ loff = pt_msec_offset(msec);
+
+ /* We remove @msec and insert new sections for the remaining
+ * parts, if any. Those new sections are not mapped initially
+ * and need to be added to the end of the section list.
+ */
+ *list = current->next;
+
+ /* Keep a list of removed sections so we can re-add them in case
+ * of errors.
+ */
+ current->next = removed;
+ removed = current;
+
+ /* Add a section covering the remaining bytes at the front. */
+ if (lbegin < begin) {
+ new = pt_mk_section_list(lsec, masid, lbegin, loff,
+ begin - lbegin, current->isid);
+ if (!new) {
+ errcode = -pte_nomem;
+ break;
+ }
+
+ new->next = next;
+ next = new;
+ }
+
+ /* Add a section covering the remaining bytes at the back. */
+ if (end < lend) {
+ new = pt_mk_section_list(lsec, masid, end,
+ loff + (end - lbegin),
+ lend - end, current->isid);
+ if (!new) {
+ errcode = -pte_nomem;
+ break;
+ }
+
+ new->next = next;
+ next = new;
+ }
+ }
+
+ if (errcode < 0) {
+ pt_section_list_free_tail(next);
+
+ /* Re-add removed sections to the tail of the section list. */
+ for (; *list; list = &((*list)->next))
+ ;
+
+ *list = removed;
+ return errcode;
+ }
+
+ pt_section_list_free_tail(removed);
+
+ *list = next;
+ return 0;
+}
+
+int pt_image_remove(struct pt_image *image, struct pt_section *section,
+ const struct pt_asid *asid, uint64_t vaddr)
+{
+ struct pt_section_list **list;
+
+ if (!image || !section)
+ return -pte_internal;
+
+ for (list = &image->sections; *list; list = &((*list)->next)) {
+ struct pt_mapped_section *msec;
+ const struct pt_section *sec;
+ const struct pt_asid *masid;
+ struct pt_section_list *trash;
+ uint64_t begin;
+ int errcode;
+
+ trash = *list;
+ msec = &trash->section;
+ masid = pt_msec_asid(msec);
+
+ errcode = pt_asid_match(masid, asid);
+ if (errcode < 0)
+ return errcode;
+
+ if (!errcode)
+ continue;
+
+ begin = pt_msec_begin(msec);
+ sec = pt_msec_section(msec);
+ if (sec == section && begin == vaddr) {
+ *list = trash->next;
+ pt_section_list_free(trash);
+
+ return 0;
+ }
+ }
+
+ return -pte_bad_image;
+}
+
+int pt_image_add_file(struct pt_image *image, const char *filename,
+ uint64_t offset, uint64_t size,
+ const struct pt_asid *uasid, uint64_t vaddr)
+{
+ struct pt_section *section;
+ struct pt_asid asid;
+ int errcode;
+
+ if (!image || !filename)
+ return -pte_invalid;
+
+ errcode = pt_asid_from_user(&asid, uasid);
+ if (errcode < 0)
+ return errcode;
+
+ section = pt_mk_section(filename, offset, size);
+ if (!section)
+ return -pte_invalid;
+
+ errcode = pt_image_add(image, section, &asid, vaddr, 0);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ return errcode;
+ }
+
+ /* The image list got its own reference; let's drop ours. */
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+
+ return 0;
+}
+
+int pt_image_copy(struct pt_image *image, const struct pt_image *src)
+{
+ struct pt_section_list *list;
+ int ignored;
+
+ if (!image || !src)
+ return -pte_invalid;
+
+ /* There is nothing to do if we copy an image to itself.
+ *
+ * Besides, pt_image_add() may move sections around, which would
+ * interfere with our section iteration.
+ */
+ if (image == src)
+ return 0;
+
+ ignored = 0;
+ for (list = src->sections; list; list = list->next) {
+ int errcode;
+
+ errcode = pt_image_add(image, list->section.section,
+ &list->section.asid,
+ list->section.vaddr,
+ list->isid);
+ if (errcode < 0)
+ ignored += 1;
+ }
+
+ return ignored;
+}
+
+int pt_image_remove_by_filename(struct pt_image *image, const char *filename,
+ const struct pt_asid *uasid)
+{
+ struct pt_section_list **list;
+ struct pt_asid asid;
+ int errcode, removed;
+
+ if (!image || !filename)
+ return -pte_invalid;
+
+ errcode = pt_asid_from_user(&asid, uasid);
+ if (errcode < 0)
+ return errcode;
+
+ removed = 0;
+ for (list = &image->sections; *list;) {
+ struct pt_mapped_section *msec;
+ const struct pt_section *sec;
+ const struct pt_asid *masid;
+ struct pt_section_list *trash;
+ const char *tname;
+
+ trash = *list;
+ msec = &trash->section;
+ masid = pt_msec_asid(msec);
+
+ errcode = pt_asid_match(masid, &asid);
+ if (errcode < 0)
+ return errcode;
+
+ if (!errcode) {
+ list = &trash->next;
+ continue;
+ }
+
+ sec = pt_msec_section(msec);
+ tname = pt_section_filename(sec);
+
+ if (tname && (strcmp(tname, filename) == 0)) {
+ *list = trash->next;
+ pt_section_list_free(trash);
+
+ removed += 1;
+ } else
+ list = &trash->next;
+ }
+
+ return removed;
+}
+
+int pt_image_remove_by_asid(struct pt_image *image,
+ const struct pt_asid *uasid)
+{
+ struct pt_section_list **list;
+ struct pt_asid asid;
+ int errcode, removed;
+
+ if (!image)
+ return -pte_invalid;
+
+ errcode = pt_asid_from_user(&asid, uasid);
+ if (errcode < 0)
+ return errcode;
+
+ removed = 0;
+ for (list = &image->sections; *list;) {
+ struct pt_mapped_section *msec;
+ const struct pt_asid *masid;
+ struct pt_section_list *trash;
+
+ trash = *list;
+ msec = &trash->section;
+ masid = pt_msec_asid(msec);
+
+ errcode = pt_asid_match(masid, &asid);
+ if (errcode < 0)
+ return errcode;
+
+ if (!errcode) {
+ list = &trash->next;
+ continue;
+ }
+
+ *list = trash->next;
+ pt_section_list_free(trash);
+
+ removed += 1;
+ }
+
+ return removed;
+}
+
+int pt_image_set_callback(struct pt_image *image,
+ read_memory_callback_t *callback, void *context)
+{
+ if (!image)
+ return -pte_invalid;
+
+ image->readmem.callback = callback;
+ image->readmem.context = context;
+
+ return 0;
+}
+
+static int pt_image_read_callback(struct pt_image *image, int *isid,
+ uint8_t *buffer, uint16_t size,
+ const struct pt_asid *asid, uint64_t addr)
+{
+ read_memory_callback_t *callback;
+
+ if (!image || !isid)
+ return -pte_internal;
+
+ callback = image->readmem.callback;
+ if (!callback)
+ return -pte_nomap;
+
+ *isid = 0;
+
+ return callback(buffer, size, asid, addr, image->readmem.context);
+}
+
+/* Check whether a mapped section contains an address.
+ *
+ * Returns zero if @msec contains @vaddr.
+ * Returns a negative error code otherwise.
+ * Returns -pte_nomap if @msec does not contain @vaddr.
+ */
+static inline int pt_image_check_msec(const struct pt_mapped_section *msec,
+ const struct pt_asid *asid,
+ uint64_t vaddr)
+{
+ const struct pt_asid *masid;
+ uint64_t begin, end;
+ int errcode;
+
+ if (!msec)
+ return -pte_internal;
+
+ begin = pt_msec_begin(msec);
+ end = pt_msec_end(msec);
+ if (vaddr < begin || end <= vaddr)
+ return -pte_nomap;
+
+ masid = pt_msec_asid(msec);
+ errcode = pt_asid_match(masid, asid);
+ if (errcode <= 0) {
+ if (!errcode)
+ errcode = -pte_nomap;
+
+ return errcode;
+ }
+
+ return 0;
+}
+
+/* Find the section containing a given address in a given address space.
+ *
+ * On success, the found section is moved to the front of the section list.
+ * If caching is enabled, maps the section.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_image_fetch_section(struct pt_image *image,
+ const struct pt_asid *asid, uint64_t vaddr)
+{
+ struct pt_section_list **start, **list;
+
+ if (!image)
+ return -pte_internal;
+
+ start = &image->sections;
+ for (list = start; *list;) {
+ struct pt_mapped_section *msec;
+ struct pt_section_list *elem;
+ int errcode;
+
+ elem = *list;
+ msec = &elem->section;
+
+ errcode = pt_image_check_msec(msec, asid, vaddr);
+ if (errcode < 0) {
+ if (errcode != -pte_nomap)
+ return errcode;
+
+ list = &elem->next;
+ continue;
+ }
+
+ /* Move the section to the front if it isn't already. */
+ if (list != start) {
+ *list = elem->next;
+ elem->next = *start;
+ *start = elem;
+ }
+
+ return 0;
+ }
+
+ return -pte_nomap;
+}
+
+int pt_image_read(struct pt_image *image, int *isid, uint8_t *buffer,
+ uint16_t size, const struct pt_asid *asid, uint64_t addr)
+{
+ struct pt_mapped_section *msec;
+ struct pt_section_list *slist;
+ struct pt_section *section;
+ int errcode, status;
+
+ if (!image || !isid)
+ return -pte_internal;
+
+ errcode = pt_image_fetch_section(image, asid, addr);
+ if (errcode < 0) {
+ if (errcode != -pte_nomap)
+ return errcode;
+
+ return pt_image_read_callback(image, isid, buffer, size, asid,
+ addr);
+ }
+
+ slist = image->sections;
+ if (!slist)
+ return -pte_internal;
+
+ *isid = slist->isid;
+ msec = &slist->section;
+
+ section = pt_msec_section(msec);
+
+ errcode = pt_section_map(section);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_msec_read(msec, buffer, size, addr);
+
+ errcode = pt_section_unmap(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (status < 0) {
+ if (status != -pte_nomap)
+ return status;
+
+ return pt_image_read_callback(image, isid, buffer, size, asid,
+ addr);
+ }
+
+ return status;
+}
+
+int pt_image_add_cached(struct pt_image *image,
+ struct pt_image_section_cache *iscache, int isid,
+ const struct pt_asid *uasid)
+{
+ struct pt_section *section;
+ struct pt_asid asid;
+ uint64_t vaddr;
+ int errcode, status;
+
+ if (!image || !iscache)
+ return -pte_invalid;
+
+ errcode = pt_iscache_lookup(iscache, &section, &vaddr, isid);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_asid_from_user(&asid, uasid);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_image_add(image, section, &asid, vaddr, isid);
+
+ /* We grab a reference when we add the section. Drop the one we
+ * obtained from cache lookup.
+ */
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+}
+
+int pt_image_find(struct pt_image *image, struct pt_mapped_section *usec,
+ const struct pt_asid *asid, uint64_t vaddr)
+{
+ struct pt_mapped_section *msec;
+ struct pt_section_list *slist;
+ struct pt_section *section;
+ int errcode;
+
+ if (!image || !usec)
+ return -pte_internal;
+
+ errcode = pt_image_fetch_section(image, asid, vaddr);
+ if (errcode < 0)
+ return errcode;
+
+ slist = image->sections;
+ if (!slist)
+ return -pte_internal;
+
+ msec = &slist->section;
+ section = pt_msec_section(msec);
+
+ errcode = pt_section_get(section);
+ if (errcode < 0)
+ return errcode;
+
+ *usec = *msec;
+
+ return slist->isid;
+}
+
+int pt_image_validate(const struct pt_image *image,
+ const struct pt_mapped_section *usec, uint64_t vaddr,
+ int isid)
+{
+ const struct pt_section_list *slist;
+ uint64_t begin, end;
+ int status;
+
+ if (!image || !usec)
+ return -pte_internal;
+
+ /* Check that @vaddr lies within @usec. */
+ begin = pt_msec_begin(usec);
+ end = pt_msec_end(usec);
+ if (vaddr < begin || end <= vaddr)
+ return -pte_nomap;
+
+ /* We assume that @usec is a copy of the top of our stack and accept
+ * sporadic validation fails if it isn't, e.g. because it has moved
+ * down.
+ *
+ * A failed validation requires decoders to re-fetch the section so it
+ * only results in a (relatively small) performance loss.
+ */
+ slist = image->sections;
+ if (!slist)
+ return -pte_nomap;
+
+ if (slist->isid != isid)
+ return -pte_nomap;
+
+ status = memcmp(&slist->section, usec, sizeof(*usec));
+ if (status)
+ return -pte_nomap;
+
+ return 0;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_image_section_cache.c b/contrib/processor-trace/libipt/src/pt_image_section_cache.c
new file mode 100644
index 0000000000000..f380890ee0b4a
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_image_section_cache.c
@@ -0,0 +1,1091 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_image_section_cache.h"
+#include "pt_section.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+
+
+static char *dupstr(const char *str)
+{
+ char *dup;
+ size_t len;
+
+ if (!str)
+ return NULL;
+
+ len = strlen(str);
+ dup = malloc(len + 1);
+ if (!dup)
+ return NULL;
+
+ return strcpy(dup, str);
+}
+
+int pt_iscache_init(struct pt_image_section_cache *iscache, const char *name)
+{
+ if (!iscache)
+ return -pte_internal;
+
+ memset(iscache, 0, sizeof(*iscache));
+ iscache->limit = UINT64_MAX;
+ if (name) {
+ iscache->name = dupstr(name);
+ if (!iscache->name)
+ return -pte_nomem;
+ }
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_init(&iscache->lock, mtx_plain);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+void pt_iscache_fini(struct pt_image_section_cache *iscache)
+{
+ if (!iscache)
+ return;
+
+ (void) pt_iscache_clear(iscache);
+ free(iscache->name);
+
+#if defined(FEATURE_THREADS)
+
+ mtx_destroy(&iscache->lock);
+
+#endif /* defined(FEATURE_THREADS) */
+}
+
+static inline int pt_iscache_lock(struct pt_image_section_cache *iscache)
+{
+ if (!iscache)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&iscache->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static inline int pt_iscache_unlock(struct pt_image_section_cache *iscache)
+{
+ if (!iscache)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&iscache->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static inline int isid_from_index(uint16_t index)
+{
+ return index + 1;
+}
+
+static int pt_iscache_expand(struct pt_image_section_cache *iscache)
+{
+ struct pt_iscache_entry *entries;
+ uint16_t capacity, target;
+
+ if (!iscache)
+ return -pte_internal;
+
+ capacity = iscache->capacity;
+ target = capacity + 8;
+
+ /* Check for overflows. */
+ if (target < capacity)
+ return -pte_nomem;
+
+ entries = realloc(iscache->entries, target * sizeof(*entries));
+ if (!entries)
+ return -pte_nomem;
+
+ iscache->capacity = target;
+ iscache->entries = entries;
+ return 0;
+}
+
+static int pt_iscache_find_locked(struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset,
+ uint64_t size, uint64_t laddr)
+{
+ uint16_t idx, end;
+
+ if (!iscache || !filename)
+ return -pte_internal;
+
+ end = iscache->size;
+ for (idx = 0; idx < end; ++idx) {
+ const struct pt_iscache_entry *entry;
+ const struct pt_section *section;
+ const char *sec_filename;
+ uint64_t sec_offset, sec_size;
+
+ entry = &iscache->entries[idx];
+
+ /* We do not zero-initialize the array - a NULL check is
+ * pointless.
+ */
+ section = entry->section;
+ sec_filename = pt_section_filename(section);
+ sec_offset = pt_section_offset(section);
+ sec_size = pt_section_size(section);
+
+ if (entry->laddr != laddr)
+ continue;
+
+ if (sec_offset != offset)
+ continue;
+
+ if (sec_size != size)
+ continue;
+
+ /* We should not have a section without a filename. */
+ if (!sec_filename)
+ return -pte_internal;
+
+ if (strcmp(sec_filename, filename) != 0)
+ continue;
+
+ return isid_from_index(idx);
+ }
+
+ return 0;
+}
+
+static int pt_iscache_lru_free(struct pt_iscache_lru_entry *lru)
+{
+ while (lru) {
+ struct pt_iscache_lru_entry *trash;
+ int errcode;
+
+ trash = lru;
+ lru = lru->next;
+
+ errcode = pt_section_unmap(trash->section);
+ if (errcode < 0)
+ return errcode;
+
+ free(trash);
+ }
+
+ return 0;
+}
+
+static int pt_iscache_lru_prune(struct pt_image_section_cache *iscache,
+ struct pt_iscache_lru_entry **tail)
+{
+ struct pt_iscache_lru_entry *lru, **pnext;
+ uint64_t limit, used;
+
+ if (!iscache || !tail)
+ return -pte_internal;
+
+ limit = iscache->limit;
+ used = 0ull;
+
+ pnext = &iscache->lru;
+ for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
+
+ used += lru->size;
+ if (used <= limit)
+ continue;
+
+ /* The cache got too big; prune it starting from @lru. */
+ iscache->used = used - lru->size;
+ *pnext = NULL;
+ *tail = lru;
+
+ return 0;
+ }
+
+ /* We shouldn't prune the cache unnecessarily. */
+ return -pte_internal;
+}
+
+/* Add @section to the front of @iscache->lru.
+ *
+ * Returns a positive integer if we need to prune the cache.
+ * Returns zero if we don't need to prune the cache.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int pt_isache_lru_new(struct pt_image_section_cache *iscache,
+ struct pt_section *section)
+{
+ struct pt_iscache_lru_entry *lru;
+ uint64_t memsize, used, total, limit;
+ int errcode;
+
+ if (!iscache)
+ return -pte_internal;
+
+ errcode = pt_section_memsize(section, &memsize);
+ if (errcode < 0)
+ return errcode;
+
+ /* Don't try to add the section if it is too big. We'd prune it again
+ * together with all other sections in our cache.
+ */
+ limit = iscache->limit;
+ if (limit < memsize)
+ return 0;
+
+ errcode = pt_section_map_share(section);
+ if (errcode < 0)
+ return errcode;
+
+ lru = malloc(sizeof(*lru));
+ if (!lru) {
+ (void) pt_section_unmap(section);
+ return -pte_nomem;
+ }
+
+ lru->section = section;
+ lru->size = memsize;
+
+ lru->next = iscache->lru;
+ iscache->lru = lru;
+
+ used = iscache->used;
+ total = used + memsize;
+ if (total < used || total < memsize)
+ return -pte_overflow;
+
+ iscache->used = total;
+
+ return (limit < total) ? 1 : 0;
+}
+
+/* Add or move @section to the front of @iscache->lru.
+ *
+ * Returns a positive integer if we need to prune the cache.
+ * Returns zero if we don't need to prune the cache.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int pt_iscache_lru_add(struct pt_image_section_cache *iscache,
+ struct pt_section *section)
+{
+ struct pt_iscache_lru_entry *lru, **pnext;
+
+ if (!iscache)
+ return -pte_internal;
+
+ pnext = &iscache->lru;
+ for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
+
+ if (lru->section != section)
+ continue;
+
+ /* We found it in the cache. Move it to the front. */
+ *pnext = lru->next;
+ lru->next = iscache->lru;
+ iscache->lru = lru;
+
+ return 0;
+ }
+
+ /* We didn't find it in the cache. Add it. */
+ return pt_isache_lru_new(iscache, section);
+}
+
+
+/* Remove @section from @iscache->lru.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_iscache_lru_remove(struct pt_image_section_cache *iscache,
+ const struct pt_section *section)
+{
+ struct pt_iscache_lru_entry *lru, **pnext;
+
+ if (!iscache)
+ return -pte_internal;
+
+ pnext = &iscache->lru;
+ for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
+
+ if (lru->section != section)
+ continue;
+
+ /* We found it in the cache. Remove it. */
+ *pnext = lru->next;
+ lru->next = NULL;
+ break;
+ }
+
+ return pt_iscache_lru_free(lru);
+}
+
+
+/* Add or move @section to the front of @iscache->lru and update its size.
+ *
+ * Returns a positive integer if we need to prune the cache.
+ * Returns zero if we don't need to prune the cache.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int pt_iscache_lru_resize(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t memsize)
+{
+ struct pt_iscache_lru_entry *lru;
+ uint64_t oldsize, used;
+ int status;
+
+ if (!iscache)
+ return -pte_internal;
+
+ status = pt_iscache_lru_add(iscache, section);
+ if (status < 0)
+ return status;
+
+ lru = iscache->lru;
+ if (!lru) {
+ if (status)
+ return -pte_internal;
+ return 0;
+ }
+
+ /* If @section is cached, it must be first.
+ *
+ * We may choose not to cache it, though, e.g. if it is too big.
+ */
+ if (lru->section != section) {
+ if (iscache->limit < memsize)
+ return 0;
+
+ return -pte_internal;
+ }
+
+ oldsize = lru->size;
+ lru->size = memsize;
+
+ /* If we need to prune anyway, we're done. */
+ if (status)
+ return status;
+
+ used = iscache->used;
+ used -= oldsize;
+ used += memsize;
+
+ iscache->used = used;
+
+ return (iscache->limit < used) ? 1 : 0;
+}
+
+/* Clear @iscache->lru.
+ *
+ * Unlike other iscache_lru functions, the caller does not lock @iscache.
+ *
+ * Return zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_iscache_lru_clear(struct pt_image_section_cache *iscache)
+{
+ struct pt_iscache_lru_entry *lru;
+ int errcode;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ lru = iscache->lru;
+ iscache->lru = NULL;
+ iscache->used = 0ull;
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ return pt_iscache_lru_free(lru);
+}
+
+/* Search @iscache for a partial or exact match of @section loaded at @laddr and
+ * return the corresponding index or @iscache->size if no match is found.
+ *
+ * The caller must lock @iscache.
+ *
+ * Returns a non-zero index on success, a negative pt_error_code otherwise.
+ */
+static int
+pt_iscache_find_section_locked(const struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset,
+ uint64_t size, uint64_t laddr)
+{
+ const struct pt_section *section;
+ uint16_t idx, end;
+ int match;
+
+ if (!iscache || !filename)
+ return -pte_internal;
+
+ section = NULL;
+ match = end = iscache->size;
+ for (idx = 0; idx < end; ++idx) {
+ const struct pt_iscache_entry *entry;
+ const struct pt_section *sec;
+
+ entry = &iscache->entries[idx];
+
+ /* We do not zero-initialize the array - a NULL check is
+ * pointless.
+ */
+ sec = entry->section;
+
+ /* Avoid redundant match checks. */
+ if (sec != section) {
+ const char *sec_filename;
+
+ /* We don't have duplicates. Skip the check. */
+ if (section)
+ continue;
+
+ if (offset != pt_section_offset(sec))
+ continue;
+
+ if (size != pt_section_size(sec))
+ continue;
+
+ sec_filename = pt_section_filename(sec);
+ if (!sec_filename)
+ return -pte_internal;
+
+ if (strcmp(filename, sec_filename) != 0)
+ continue;
+
+ /* Use the cached section instead. */
+ section = sec;
+ match = idx;
+ }
+
+ /* If we didn't continue, @section == @sec and we have a match.
+ *
+ * If we also find a matching load address, we're done.
+ */
+ if (laddr == entry->laddr)
+ return idx;
+ }
+
+ return match;
+}
+
+int pt_iscache_add(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t laddr)
+{
+ const char *filename;
+ uint64_t offset, size;
+ uint16_t idx;
+ int errcode;
+
+ if (!iscache || !section)
+ return -pte_internal;
+
+ /* We must have a filename for @section. */
+ filename = pt_section_filename(section);
+ if (!filename)
+ return -pte_internal;
+
+ offset = pt_section_offset(section);
+ size = pt_section_size(section);
+
+ /* Adding a section is slightly complicated by a potential deadlock
+ * scenario:
+ *
+ * - in order to add a section, we need to attach to it, which
+ * requires taking the section's attach lock.
+ *
+ * - if we are already attached to it, we may receive on-map
+ * notifications, which will be sent while holding the attach lock
+ * and require taking the iscache lock.
+ *
+ * Hence we can't attach to a section while holding the iscache lock.
+ *
+ *
+ * We therefore attach to @section first and then lock @iscache.
+ *
+ * This opens a small window where an existing @section may be removed
+ * from @iscache and replaced by a new matching section. We would want
+ * to share that new section rather than adding a duplicate @section.
+ *
+ * After locking @iscache, we therefore check for existing matching
+ * sections and, if one is found, update @section. This involves
+ * detaching from @section and attaching to the existing section.
+ *
+ * And for this, we will have to temporarily unlock @iscache again.
+ */
+ errcode = pt_section_get(section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_attach(section, iscache);
+ if (errcode < 0)
+ goto out_put;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ goto out_detach;
+
+ /* We may need to repeat this step.
+ *
+ * Typically we don't and this takes only a single iteration. One
+ * scenario where we do repeat this is when adding a section with an
+ * out-of-bounds size.
+ *
+ * We will not find a matching section in pt_iscache_add_file() so we
+ * create a new section. This will have its size reduced to match the
+ * actual file size.
+ *
+ * For this reduced size, we may now find an existing section, and we
+ * will take another trip in the below loop.
+ */
+ for (;;) {
+ const struct pt_iscache_entry *entry;
+ struct pt_section *sec;
+ int match;
+
+ /* Find an existing section matching @section that we'd share
+ * rather than adding @section.
+ */
+ match = pt_iscache_find_section_locked(iscache, filename,
+ offset, size, laddr);
+ if (match < 0) {
+ errcode = match;
+ goto out_unlock_detach;
+ }
+
+ /* We're done if we have not found a matching section. */
+ if (iscache->size <= match)
+ break;
+
+ entry = &iscache->entries[match];
+
+ /* We're also done if we found the same section again.
+ *
+ * We further check for a perfect match. In that case, we don't
+ * need to insert anything, at all.
+ */
+ sec = entry->section;
+ if (sec == section) {
+ if (entry->laddr == laddr) {
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ goto out_detach;
+
+ errcode = pt_section_detach(section, iscache);
+ if (errcode < 0)
+ goto out_lru;
+
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+
+ return isid_from_index((uint16_t) match);
+ }
+
+ break;
+ }
+
+ /* We update @section to share the existing @sec.
+ *
+ * This requires detaching from @section, which, in turn,
+ * requires temporarily unlocking @iscache.
+ *
+ * We further need to remove @section from @iscache->lru.
+ */
+ errcode = pt_section_get(sec);
+ if (errcode < 0)
+ goto out_unlock_detach;
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0) {
+ (void) pt_section_put(sec);
+ goto out_detach;
+ }
+
+ errcode = pt_section_detach(section, iscache);
+ if (errcode < 0) {
+ (void) pt_section_put(sec);
+ goto out_lru;
+ }
+
+ errcode = pt_section_attach(sec, iscache);
+ if (errcode < 0) {
+ (void) pt_section_put(sec);
+ goto out_lru;
+ }
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ /* Complete the swap for cleanup. */
+ section = sec;
+ goto out_detach;
+ }
+
+ /* We may have received on-map notifications for @section and we
+ * may have added @section to @iscache->lru.
+ *
+ * Since we're still holding a reference to it, no harm has been
+ * done. But we need to remove it before we drop our reference.
+ */
+ errcode = pt_iscache_lru_remove(iscache, section);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ /* Complete the swap for cleanup. */
+ section = sec;
+ goto out_unlock_detach;
+ }
+
+ /* Drop the reference to @section. */
+ errcode = pt_section_put(section);
+ if (errcode < 0) {
+ /* Complete the swap for cleanup. */
+ section = sec;
+ goto out_unlock_detach;
+ }
+
+ /* Swap sections.
+ *
+ * We will try again in the next iteration.
+ */
+ section = sec;
+ }
+
+ /* Expand the cache, if necessary. */
+ if (iscache->capacity <= iscache->size) {
+ /* We must never exceed the capacity. */
+ if (iscache->capacity < iscache->size) {
+ errcode = -pte_internal;
+ goto out_unlock_detach;
+ }
+
+ errcode = pt_iscache_expand(iscache);
+ if (errcode < 0)
+ goto out_unlock_detach;
+
+ /* Make sure it is big enough, now. */
+ if (iscache->capacity <= iscache->size) {
+ errcode = -pte_internal;
+ goto out_unlock_detach;
+ }
+ }
+
+ /* Insert a new entry for @section at @laddr.
+ *
+ * This hands both attach and reference over to @iscache. We will
+ * detach and drop the reference again when the entry is removed.
+ */
+ idx = iscache->size++;
+
+ iscache->entries[idx].section = section;
+ iscache->entries[idx].laddr = laddr;
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ return isid_from_index(idx);
+
+ out_unlock_detach:
+ (void) pt_iscache_unlock(iscache);
+
+ out_detach:
+ (void) pt_section_detach(section, iscache);
+
+ out_lru:
+ (void) pt_iscache_lru_clear(iscache);
+
+ out_put:
+ (void) pt_section_put(section);
+
+ return errcode;
+}
+
+int pt_iscache_find(struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset, uint64_t size,
+ uint64_t laddr)
+{
+ int errcode, isid;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ isid = pt_iscache_find_locked(iscache, filename, offset, size, laddr);
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ return isid;
+}
+
+int pt_iscache_lookup(struct pt_image_section_cache *iscache,
+ struct pt_section **section, uint64_t *laddr, int isid)
+{
+ uint16_t index;
+ int errcode, status;
+
+ if (!iscache || !section || !laddr)
+ return -pte_internal;
+
+ if (isid <= 0)
+ return -pte_bad_image;
+
+ isid -= 1;
+ if (isid > UINT16_MAX)
+ return -pte_internal;
+
+ index = (uint16_t) isid;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ if (iscache->size <= index)
+ status = -pte_bad_image;
+ else {
+ const struct pt_iscache_entry *entry;
+
+ entry = &iscache->entries[index];
+ *section = entry->section;
+ *laddr = entry->laddr;
+
+ status = pt_section_get(*section);
+ }
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+}
+
+int pt_iscache_clear(struct pt_image_section_cache *iscache)
+{
+ struct pt_iscache_lru_entry *lru;
+ struct pt_iscache_entry *entries;
+ uint16_t idx, end;
+ int errcode;
+
+ if (!iscache)
+ return -pte_internal;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ entries = iscache->entries;
+ end = iscache->size;
+ lru = iscache->lru;
+
+ iscache->entries = NULL;
+ iscache->capacity = 0;
+ iscache->size = 0;
+ iscache->lru = NULL;
+ iscache->used = 0ull;
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_iscache_lru_free(lru);
+ if (errcode < 0)
+ return errcode;
+
+ for (idx = 0; idx < end; ++idx) {
+ struct pt_section *section;
+
+ section = entries[idx].section;
+
+ /* We do not zero-initialize the array - a NULL check is
+ * pointless.
+ */
+ errcode = pt_section_detach(section, iscache);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ free(entries);
+ return 0;
+}
+
+struct pt_image_section_cache *pt_iscache_alloc(const char *name)
+{
+ struct pt_image_section_cache *iscache;
+
+ iscache = malloc(sizeof(*iscache));
+ if (iscache)
+ pt_iscache_init(iscache, name);
+
+ return iscache;
+}
+
+void pt_iscache_free(struct pt_image_section_cache *iscache)
+{
+ if (!iscache)
+ return;
+
+ pt_iscache_fini(iscache);
+ free(iscache);
+}
+
+int pt_iscache_set_limit(struct pt_image_section_cache *iscache, uint64_t limit)
+{
+ struct pt_iscache_lru_entry *tail;
+ int errcode, status;
+
+ if (!iscache)
+ return -pte_invalid;
+
+ status = 0;
+ tail = NULL;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ iscache->limit = limit;
+ if (limit < iscache->used)
+ status = pt_iscache_lru_prune(iscache, &tail);
+
+ errcode = pt_iscache_unlock(iscache);
+
+ if (errcode < 0 || status < 0)
+ return (status < 0) ? status : errcode;
+
+ return pt_iscache_lru_free(tail);
+}
+
+const char *pt_iscache_name(const struct pt_image_section_cache *iscache)
+{
+ if (!iscache)
+ return NULL;
+
+ return iscache->name;
+}
+
+int pt_iscache_add_file(struct pt_image_section_cache *iscache,
+ const char *filename, uint64_t offset, uint64_t size,
+ uint64_t vaddr)
+{
+ struct pt_section *section;
+ int errcode, match, isid;
+
+ if (!iscache || !filename)
+ return -pte_invalid;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ match = pt_iscache_find_section_locked(iscache, filename, offset,
+ size, vaddr);
+ if (match < 0) {
+ (void) pt_iscache_unlock(iscache);
+ return match;
+ }
+
+ /* If we found a perfect match, we will share the existing entry.
+ *
+ * If we found a section, we need to grab a reference before we unlock.
+ *
+ * If we didn't find a matching section, we create a new section, which
+ * implicitly gives us a reference to it.
+ */
+ if (match < iscache->size) {
+ const struct pt_iscache_entry *entry;
+
+ entry = &iscache->entries[match];
+ if (entry->laddr == vaddr) {
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ return isid_from_index((uint16_t) match);
+ }
+
+ section = entry->section;
+
+ errcode = pt_section_get(section);
+ if (errcode < 0) {
+ (void) pt_iscache_unlock(iscache);
+ return errcode;
+ }
+
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ return errcode;
+ }
+ } else {
+ errcode = pt_iscache_unlock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ section = pt_mk_section(filename, offset, size);
+ if (!section)
+ return -pte_invalid;
+ }
+
+ /* We unlocked @iscache and hold a reference to @section. */
+ isid = pt_iscache_add(iscache, section, vaddr);
+
+ /* We grab a reference when we add the section. Drop the one we
+ * obtained before.
+ */
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+
+ return isid;
+}
+
+
+int pt_iscache_read(struct pt_image_section_cache *iscache, uint8_t *buffer,
+ uint64_t size, int isid, uint64_t vaddr)
+{
+ struct pt_section *section;
+ uint64_t laddr;
+ int errcode, status;
+
+ if (!iscache || !buffer || !size)
+ return -pte_invalid;
+
+ errcode = pt_iscache_lookup(iscache, &section, &laddr, isid);
+ if (errcode < 0)
+ return errcode;
+
+ if (vaddr < laddr) {
+ (void) pt_section_put(section);
+ return -pte_nomap;
+ }
+
+ vaddr -= laddr;
+
+ errcode = pt_section_map(section);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ return errcode;
+ }
+
+ /* We truncate the read if it gets too big. The user is expected to
+ * issue further reads for the remaining part.
+ */
+ if (UINT16_MAX < size)
+ size = UINT16_MAX;
+
+ status = pt_section_read(section, buffer, (uint16_t) size, vaddr);
+
+ errcode = pt_section_unmap(section);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ return errcode;
+ }
+
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+}
+
+int pt_iscache_notify_map(struct pt_image_section_cache *iscache,
+ struct pt_section *section)
+{
+ struct pt_iscache_lru_entry *tail;
+ int errcode, status;
+
+ tail = NULL;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_iscache_lru_add(iscache, section);
+ if (status > 0)
+ status = pt_iscache_lru_prune(iscache, &tail);
+
+ errcode = pt_iscache_unlock(iscache);
+
+ if (errcode < 0 || status < 0)
+ return (status < 0) ? status : errcode;
+
+ return pt_iscache_lru_free(tail);
+}
+
+int pt_iscache_notify_resize(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t memsize)
+{
+ struct pt_iscache_lru_entry *tail;
+ int errcode, status;
+
+ tail = NULL;
+
+ errcode = pt_iscache_lock(iscache);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_iscache_lru_resize(iscache, section, memsize);
+ if (status > 0)
+ status = pt_iscache_lru_prune(iscache, &tail);
+
+ errcode = pt_iscache_unlock(iscache);
+
+ if (errcode < 0 || status < 0)
+ return (status < 0) ? status : errcode;
+
+ return pt_iscache_lru_free(tail);
+}
diff --git a/contrib/processor-trace/libipt/src/pt_insn.c b/contrib/processor-trace/libipt/src/pt_insn.c
new file mode 100644
index 0000000000000..0a41c4bf391ef
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_insn.c
@@ -0,0 +1,372 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_insn.h"
+#include "pt_ild.h"
+#include "pt_image.h"
+#include "pt_compiler.h"
+
+#include "intel-pt.h"
+
+
+int pt_insn_changes_cpl(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ (void) insn;
+
+ if (!iext)
+ return 0;
+
+ switch (iext->iclass) {
+ default:
+ return 0;
+
+ case PTI_INST_INT:
+ case PTI_INST_INT3:
+ case PTI_INST_INT1:
+ case PTI_INST_INTO:
+ case PTI_INST_IRET:
+ case PTI_INST_SYSCALL:
+ case PTI_INST_SYSENTER:
+ case PTI_INST_SYSEXIT:
+ case PTI_INST_SYSRET:
+ return 1;
+ }
+}
+
+int pt_insn_changes_cr3(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ (void) insn;
+
+ if (!iext)
+ return 0;
+
+ switch (iext->iclass) {
+ default:
+ return 0;
+
+ case PTI_INST_MOV_CR3:
+ return 1;
+ }
+}
+
+int pt_insn_is_branch(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ (void) iext;
+
+ if (!insn)
+ return 0;
+
+ switch (insn->iclass) {
+ default:
+ return 0;
+
+ case ptic_call:
+ case ptic_return:
+ case ptic_jump:
+ case ptic_cond_jump:
+ case ptic_far_call:
+ case ptic_far_return:
+ case ptic_far_jump:
+ return 1;
+ }
+}
+
+int pt_insn_is_far_branch(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ (void) iext;
+
+ if (!insn)
+ return 0;
+
+ switch (insn->iclass) {
+ default:
+ return 0;
+
+ case ptic_far_call:
+ case ptic_far_return:
+ case ptic_far_jump:
+ return 1;
+ }
+}
+
+int pt_insn_binds_to_pip(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!iext)
+ return 0;
+
+ switch (iext->iclass) {
+ default:
+ return pt_insn_is_far_branch(insn, iext);
+
+ case PTI_INST_MOV_CR3:
+ case PTI_INST_VMLAUNCH:
+ case PTI_INST_VMRESUME:
+ return 1;
+ }
+}
+
+int pt_insn_binds_to_vmcs(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!iext)
+ return 0;
+
+ switch (iext->iclass) {
+ default:
+ return pt_insn_is_far_branch(insn, iext);
+
+ case PTI_INST_VMPTRLD:
+ case PTI_INST_VMLAUNCH:
+ case PTI_INST_VMRESUME:
+ return 1;
+ }
+}
+
+int pt_insn_is_ptwrite(const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ (void) iext;
+
+ if (!insn)
+ return 0;
+
+ switch (insn->iclass) {
+ default:
+ return 0;
+
+ case ptic_ptwrite:
+ return 1;
+ }
+}
+
+int pt_insn_next_ip(uint64_t *pip, const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ uint64_t ip;
+
+ if (!insn || !iext)
+ return -pte_internal;
+
+ ip = insn->ip + insn->size;
+
+ switch (insn->iclass) {
+ case ptic_ptwrite:
+ case ptic_other:
+ break;
+
+ case ptic_call:
+ case ptic_jump:
+ if (iext->variant.branch.is_direct) {
+ ip += iext->variant.branch.displacement;
+ break;
+ }
+
+ fallthrough;
+ default:
+ return -pte_bad_query;
+
+ case ptic_error:
+ return -pte_bad_insn;
+ }
+
+ if (pip)
+ *pip = ip;
+
+ return 0;
+}
+
+/* Retry decoding an instruction after a preceding decode error.
+ *
+ * Instruction length decode typically fails due to 'not enough
+ * bytes'.
+ *
+ * This may be caused by partial updates of text sections
+ * represented via new image sections overlapping the original
+ * text section's image section. We stop reading memory at the
+ * end of the section so we do not read the full instruction if
+ * parts of it have been overwritten by the update.
+ *
+ * Try to read the remaining bytes and decode the instruction again. If we
+ * succeed, set @insn->truncated to indicate that the instruction is truncated
+ * in @insn->isid.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ * Returns -pte_bad_insn if the instruction could not be decoded.
+ */
+static int pt_insn_decode_retry(struct pt_insn *insn, struct pt_insn_ext *iext,
+ struct pt_image *image,
+ const struct pt_asid *asid)
+{
+ int size, errcode, isid;
+ uint8_t isize, remaining;
+
+ if (!insn)
+ return -pte_internal;
+
+ isize = insn->size;
+ remaining = sizeof(insn->raw) - isize;
+
+ /* We failed for real if we already read the maximum number of bytes for
+ * an instruction.
+ */
+ if (!remaining)
+ return -pte_bad_insn;
+
+ /* Read the remaining bytes from the image. */
+ size = pt_image_read(image, &isid, &insn->raw[isize], remaining, asid,
+ insn->ip + isize);
+ if (size <= 0) {
+ /* We should have gotten an error if we were not able to read at
+ * least one byte. Check this to guarantee termination.
+ */
+ if (!size)
+ return -pte_internal;
+
+ /* Preserve the original error if there are no more bytes. */
+ if (size == -pte_nomap)
+ size = -pte_bad_insn;
+
+ return size;
+ }
+
+ /* Add the newly read bytes to the instruction's size. */
+ insn->size += (uint8_t) size;
+
+ /* Store the new size to avoid infinite recursion in case instruction
+ * decode fails after length decode, which would set @insn->size to the
+ * actual length.
+ */
+ size = insn->size;
+
+ /* Try to decode the instruction again.
+ *
+ * If we fail again, we recursively retry again until we either fail to
+ * read more bytes or reach the maximum number of bytes for an
+ * instruction.
+ */
+ errcode = pt_ild_decode(insn, iext);
+ if (errcode < 0) {
+ if (errcode != -pte_bad_insn)
+ return errcode;
+
+ /* If instruction length decode already determined the size,
+ * there's no point in reading more bytes.
+ */
+ if (insn->size != (uint8_t) size)
+ return errcode;
+
+ return pt_insn_decode_retry(insn, iext, image, asid);
+ }
+
+ /* We succeeded this time, so the instruction crosses image section
+ * boundaries.
+ *
+ * This poses the question which isid to use for the instruction.
+ *
+ * To reconstruct exactly this instruction at a later time, we'd need to
+ * store all isids involved together with the number of bytes read for
+ * each isid. Since @insn already provides the exact bytes for this
+ * instruction, we assume that the isid will be used solely for source
+ * correlation. In this case, it should refer to the first byte of the
+ * instruction - as it already does.
+ */
+ insn->truncated = 1;
+
+ return errcode;
+}
+
+int pt_insn_decode(struct pt_insn *insn, struct pt_insn_ext *iext,
+ struct pt_image *image, const struct pt_asid *asid)
+{
+ int size, errcode;
+
+ if (!insn)
+ return -pte_internal;
+
+ /* Read the memory at the current IP in the current address space. */
+ size = pt_image_read(image, &insn->isid, insn->raw, sizeof(insn->raw),
+ asid, insn->ip);
+ if (size < 0)
+ return size;
+
+ /* We initialize @insn->size to the maximal possible size. It will be
+ * set to the actual size during instruction decode.
+ */
+ insn->size = (uint8_t) size;
+
+ errcode = pt_ild_decode(insn, iext);
+ if (errcode < 0) {
+ if (errcode != -pte_bad_insn)
+ return errcode;
+
+ /* If instruction length decode already determined the size,
+ * there's no point in reading more bytes.
+ */
+ if (insn->size != (uint8_t) size)
+ return errcode;
+
+ return pt_insn_decode_retry(insn, iext, image, asid);
+ }
+
+ return errcode;
+}
+
+int pt_insn_range_is_contiguous(uint64_t begin, uint64_t end,
+ enum pt_exec_mode mode, struct pt_image *image,
+ const struct pt_asid *asid, size_t steps)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+
+ memset(&insn, 0, sizeof(insn));
+
+ insn.mode = mode;
+ insn.ip = begin;
+
+ while (insn.ip != end) {
+ int errcode;
+
+ if (!steps--)
+ return 0;
+
+ errcode = pt_insn_decode(&insn, &iext, image, asid);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_insn_next_ip(&insn.ip, &insn, &iext);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 1;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_insn_decoder.c b/contrib/processor-trace/libipt/src/pt_insn_decoder.c
new file mode 100644
index 0000000000000..0cf8740ee8dcd
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_insn_decoder.c
@@ -0,0 +1,1765 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_insn_decoder.h"
+#include "pt_insn.h"
+#include "pt_config.h"
+#include "pt_asid.h"
+#include "pt_compiler.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+
+static int pt_insn_check_ip_event(struct pt_insn_decoder *,
+ const struct pt_insn *,
+ const struct pt_insn_ext *);
+
+
+static void pt_insn_reset(struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ decoder->mode = ptem_unknown;
+ decoder->ip = 0ull;
+ decoder->status = 0;
+ decoder->enabled = 0;
+ decoder->process_event = 0;
+ decoder->speculative = 0;
+ decoder->process_insn = 0;
+ decoder->bound_paging = 0;
+ decoder->bound_vmcs = 0;
+ decoder->bound_ptwrite = 0;
+
+ pt_retstack_init(&decoder->retstack);
+ pt_asid_init(&decoder->asid);
+}
+
+static int pt_insn_status(const struct pt_insn_decoder *decoder, int flags)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = decoder->status;
+
+ /* Indicate whether tracing is disabled or enabled.
+ *
+ * This duplicates the indication in struct pt_insn and covers the case
+ * where we indicate the status after synchronizing.
+ */
+ if (!decoder->enabled)
+ flags |= pts_ip_suppressed;
+
+ /* Forward end-of-trace indications.
+ *
+ * Postpone it as long as we're still processing events, though.
+ */
+ if ((status & pts_eos) && !decoder->process_event)
+ flags |= pts_eos;
+
+ return flags;
+}
+
+/* Initialize the query decoder flags based on our flags. */
+
+static int pt_insn_init_qry_flags(struct pt_conf_flags *qflags,
+ const struct pt_conf_flags *flags)
+{
+ if (!qflags || !flags)
+ return -pte_internal;
+
+ memset(qflags, 0, sizeof(*qflags));
+
+ return 0;
+}
+
+int pt_insn_decoder_init(struct pt_insn_decoder *decoder,
+ const struct pt_config *uconfig)
+{
+ struct pt_config config;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ errcode = pt_config_from_user(&config, uconfig);
+ if (errcode < 0)
+ return errcode;
+
+ /* The user supplied decoder flags. */
+ decoder->flags = config.flags;
+
+ /* Set the flags we need for the query decoder we use. */
+ errcode = pt_insn_init_qry_flags(&config.flags, &decoder->flags);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_qry_decoder_init(&decoder->query, &config);
+ if (errcode < 0)
+ return errcode;
+
+ pt_image_init(&decoder->default_image, NULL);
+ decoder->image = &decoder->default_image;
+
+ errcode = pt_msec_cache_init(&decoder->scache);
+ if (errcode < 0)
+ return errcode;
+
+ pt_insn_reset(decoder);
+
+ return 0;
+}
+
+void pt_insn_decoder_fini(struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ pt_msec_cache_fini(&decoder->scache);
+ pt_image_fini(&decoder->default_image);
+ pt_qry_decoder_fini(&decoder->query);
+}
+
+struct pt_insn_decoder *pt_insn_alloc_decoder(const struct pt_config *config)
+{
+ struct pt_insn_decoder *decoder;
+ int errcode;
+
+ decoder = malloc(sizeof(*decoder));
+ if (!decoder)
+ return NULL;
+
+ errcode = pt_insn_decoder_init(decoder, config);
+ if (errcode < 0) {
+ free(decoder);
+ return NULL;
+ }
+
+ return decoder;
+}
+
+void pt_insn_free_decoder(struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ pt_insn_decoder_fini(decoder);
+ free(decoder);
+}
+
+/* Maybe synthesize a tick event.
+ *
+ * If we're not already processing events, check the current time against the
+ * last event's time. If it changed, synthesize a tick event with the new time.
+ *
+ * Returns zero if no tick event has been created.
+ * Returns a positive integer if a tick event has been created.
+ * Returns a negative error code otherwise.
+ */
+static int pt_insn_tick(struct pt_insn_decoder *decoder, uint64_t ip)
+{
+ struct pt_event *ev;
+ uint64_t tsc;
+ uint32_t lost_mtc, lost_cyc;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* We're not generating tick events if tracing is disabled. */
+ if (!decoder->enabled)
+ return -pte_internal;
+
+ /* Events already provide a timestamp so there is no need to synthesize
+ * an artificial tick event. There's no room, either, since this would
+ * overwrite the in-progress event.
+ *
+ * In rare cases where we need to proceed to an event location using
+ * trace this may cause us to miss a timing update if the event is not
+ * forwarded to the user.
+ *
+ * The only case I can come up with at the moment is a MODE.EXEC binding
+ * to the TIP IP of a far branch.
+ */
+ if (decoder->process_event)
+ return 0;
+
+ errcode = pt_qry_time(&decoder->query, &tsc, &lost_mtc, &lost_cyc);
+ if (errcode < 0) {
+ /* If we don't have wall-clock time, we use relative time. */
+ if (errcode != -pte_no_time)
+ return errcode;
+ }
+
+ ev = &decoder->event;
+
+ /* We're done if time has not changed since the last event. */
+ if (tsc == ev->tsc)
+ return 0;
+
+ /* Time has changed so we create a new tick event. */
+ memset(ev, 0, sizeof(*ev));
+ ev->type = ptev_tick;
+ ev->variant.tick.ip = ip;
+
+ /* Indicate if we have wall-clock time or only relative time. */
+ if (errcode != -pte_no_time)
+ ev->has_tsc = 1;
+ ev->tsc = tsc;
+ ev->lost_mtc = lost_mtc;
+ ev->lost_cyc = lost_cyc;
+
+ /* We now have an event to process. */
+ decoder->process_event = 1;
+
+ return 1;
+}
+
+/* Query an indirect branch.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_insn_indirect_branch(struct pt_insn_decoder *decoder,
+ uint64_t *ip)
+{
+ uint64_t evip;
+ int status, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ evip = decoder->ip;
+
+ status = pt_qry_indirect_branch(&decoder->query, ip);
+ if (status < 0)
+ return status;
+
+ if (decoder->flags.variant.insn.enable_tick_events) {
+ errcode = pt_insn_tick(decoder, evip);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return status;
+}
+
+/* Query a conditional branch.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_insn_cond_branch(struct pt_insn_decoder *decoder, int *taken)
+{
+ int status, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_cond_branch(&decoder->query, taken);
+ if (status < 0)
+ return status;
+
+ if (decoder->flags.variant.insn.enable_tick_events) {
+ errcode = pt_insn_tick(decoder, decoder->ip);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return status;
+}
+
+static int pt_insn_start(struct pt_insn_decoder *decoder, int status)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ if (status < 0)
+ return status;
+
+ decoder->status = status;
+
+ if (!(status & pts_ip_suppressed))
+ decoder->enabled = 1;
+
+ /* Process any initial events.
+ *
+ * Some events are processed after proceeding to the next IP in order to
+ * indicate things like tracing disable or trace stop in the preceding
+ * instruction. Those events will be processed without such an
+ * indication before decoding the current instruction.
+ *
+ * We do this already here so we can indicate user-events that precede
+ * the first instruction.
+ */
+ return pt_insn_check_ip_event(decoder, NULL, NULL);
+}
+
+int pt_insn_sync_forward(struct pt_insn_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ pt_insn_reset(decoder);
+
+ status = pt_qry_sync_forward(&decoder->query, &decoder->ip);
+
+ return pt_insn_start(decoder, status);
+}
+
+int pt_insn_sync_backward(struct pt_insn_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ pt_insn_reset(decoder);
+
+ status = pt_qry_sync_backward(&decoder->query, &decoder->ip);
+
+ return pt_insn_start(decoder, status);
+}
+
+int pt_insn_sync_set(struct pt_insn_decoder *decoder, uint64_t offset)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ pt_insn_reset(decoder);
+
+ status = pt_qry_sync_set(&decoder->query, &decoder->ip, offset);
+
+ return pt_insn_start(decoder, status);
+}
+
+int pt_insn_get_offset(const struct pt_insn_decoder *decoder, uint64_t *offset)
+{
+ if (!decoder)
+ return -pte_invalid;
+
+ return pt_qry_get_offset(&decoder->query, offset);
+}
+
+int pt_insn_get_sync_offset(const struct pt_insn_decoder *decoder,
+ uint64_t *offset)
+{
+ if (!decoder)
+ return -pte_invalid;
+
+ return pt_qry_get_sync_offset(&decoder->query, offset);
+}
+
+struct pt_image *pt_insn_get_image(struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return NULL;
+
+ return decoder->image;
+}
+
+int pt_insn_set_image(struct pt_insn_decoder *decoder,
+ struct pt_image *image)
+{
+ if (!decoder)
+ return -pte_invalid;
+
+ if (!image)
+ image = &decoder->default_image;
+
+ decoder->image = image;
+ return 0;
+}
+
+const struct pt_config *
+pt_insn_get_config(const struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return NULL;
+
+ return pt_qry_get_config(&decoder->query);
+}
+
+int pt_insn_time(struct pt_insn_decoder *decoder, uint64_t *time,
+ uint32_t *lost_mtc, uint32_t *lost_cyc)
+{
+ if (!decoder || !time)
+ return -pte_invalid;
+
+ return pt_qry_time(&decoder->query, time, lost_mtc, lost_cyc);
+}
+
+int pt_insn_core_bus_ratio(struct pt_insn_decoder *decoder, uint32_t *cbr)
+{
+ if (!decoder || !cbr)
+ return -pte_invalid;
+
+ return pt_qry_core_bus_ratio(&decoder->query, cbr);
+}
+
+int pt_insn_asid(const struct pt_insn_decoder *decoder, struct pt_asid *asid,
+ size_t size)
+{
+ if (!decoder || !asid)
+ return -pte_invalid;
+
+ return pt_asid_to_user(asid, &decoder->asid, size);
+}
+
+static inline int event_pending(struct pt_insn_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ if (decoder->process_event)
+ return 1;
+
+ status = decoder->status;
+ if (!(status & pts_event_pending))
+ return 0;
+
+ status = pt_qry_event(&decoder->query, &decoder->event,
+ sizeof(decoder->event));
+ if (status < 0)
+ return status;
+
+ decoder->process_event = 1;
+ decoder->status = status;
+ return 1;
+}
+
+static int check_erratum_skd022(struct pt_insn_decoder *decoder)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ insn.mode = decoder->mode;
+ insn.ip = decoder->ip;
+
+ errcode = pt_insn_decode(&insn, &iext, decoder->image, &decoder->asid);
+ if (errcode < 0)
+ return 0;
+
+ switch (iext.iclass) {
+ default:
+ return 0;
+
+ case PTI_INST_VMLAUNCH:
+ case PTI_INST_VMRESUME:
+ return 1;
+ }
+}
+
+static inline int handle_erratum_skd022(struct pt_insn_decoder *decoder)
+{
+ struct pt_event *ev;
+ uint64_t ip;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ errcode = check_erratum_skd022(decoder);
+ if (errcode <= 0)
+ return errcode;
+
+ /* We turn the async disable into a sync disable. It will be processed
+ * after decoding the instruction.
+ */
+ ev = &decoder->event;
+
+ ip = ev->variant.async_disabled.ip;
+
+ ev->type = ptev_disabled;
+ ev->variant.disabled.ip = ip;
+
+ return 1;
+}
+
+static int pt_insn_proceed(struct pt_insn_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!decoder || !insn || !iext)
+ return -pte_internal;
+
+ /* Branch displacements apply to the next instruction. */
+ decoder->ip += insn->size;
+
+ /* We handle non-branches, non-taken conditional branches, and
+ * compressed returns directly in the switch and do some pre-work for
+ * calls.
+ *
+ * All kinds of branches are handled below the switch.
+ */
+ switch (insn->iclass) {
+ case ptic_ptwrite:
+ case ptic_other:
+ return 0;
+
+ case ptic_cond_jump: {
+ int status, taken;
+
+ status = pt_insn_cond_branch(decoder, &taken);
+ if (status < 0)
+ return status;
+
+ decoder->status = status;
+ if (!taken)
+ return 0;
+
+ break;
+ }
+
+ case ptic_call:
+ /* Log the call for return compression.
+ *
+ * Unless this is a call to the next instruction as is used
+ * for position independent code.
+ */
+ if (iext->variant.branch.displacement ||
+ !iext->variant.branch.is_direct)
+ pt_retstack_push(&decoder->retstack, decoder->ip);
+
+ break;
+
+ case ptic_return: {
+ int taken, status;
+
+ /* Check for a compressed return. */
+ status = pt_insn_cond_branch(decoder, &taken);
+ if (status >= 0) {
+ decoder->status = status;
+
+ /* A compressed return is indicated by a taken
+ * conditional branch.
+ */
+ if (!taken)
+ return -pte_bad_retcomp;
+
+ return pt_retstack_pop(&decoder->retstack,
+ &decoder->ip);
+ }
+
+ break;
+ }
+
+ case ptic_jump:
+ case ptic_far_call:
+ case ptic_far_return:
+ case ptic_far_jump:
+ break;
+
+ case ptic_error:
+ return -pte_bad_insn;
+ }
+
+ /* Process a direct or indirect branch.
+ *
+ * This combines calls, uncompressed returns, taken conditional jumps,
+ * and all flavors of far transfers.
+ */
+ if (iext->variant.branch.is_direct)
+ decoder->ip += iext->variant.branch.displacement;
+ else {
+ int status;
+
+ status = pt_insn_indirect_branch(decoder, &decoder->ip);
+
+ if (status < 0)
+ return status;
+
+ decoder->status = status;
+
+ /* We do need an IP to proceed. */
+ if (status & pts_ip_suppressed)
+ return -pte_noip;
+ }
+
+ return 0;
+}
+
+static int pt_insn_at_skl014(const struct pt_event *ev,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext,
+ const struct pt_config *config)
+{
+ uint64_t ip;
+ int status;
+
+ if (!ev || !insn || !iext || !config)
+ return -pte_internal;
+
+ if (!ev->ip_suppressed)
+ return 0;
+
+ switch (insn->iclass) {
+ case ptic_call:
+ case ptic_jump:
+ /* The erratum only applies to unconditional direct branches. */
+ if (!iext->variant.branch.is_direct)
+ break;
+
+ /* Check the filter against the branch target. */
+ ip = insn->ip;
+ ip += insn->size;
+ ip += iext->variant.branch.displacement;
+
+ status = pt_filter_addr_check(&config->addr_filter, ip);
+ if (status <= 0) {
+ if (status < 0)
+ return status;
+
+ return 1;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int pt_insn_at_disabled_event(const struct pt_event *ev,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext,
+ const struct pt_config *config)
+{
+ if (!ev || !insn || !iext || !config)
+ return -pte_internal;
+
+ if (ev->ip_suppressed) {
+ if (pt_insn_is_far_branch(insn, iext) ||
+ pt_insn_changes_cpl(insn, iext) ||
+ pt_insn_changes_cr3(insn, iext))
+ return 1;
+
+ /* If we don't have a filter configuration we assume that no
+ * address filters were used and the erratum does not apply.
+ *
+ * We might otherwise disable tracing too early.
+ */
+ if (config->addr_filter.config.addr_cfg &&
+ config->errata.skl014 &&
+ pt_insn_at_skl014(ev, insn, iext, config))
+ return 1;
+ } else {
+ switch (insn->iclass) {
+ case ptic_ptwrite:
+ case ptic_other:
+ break;
+
+ case ptic_call:
+ case ptic_jump:
+ /* If we got an IP with the disabled event, we may
+ * ignore direct branches that go to a different IP.
+ */
+ if (iext->variant.branch.is_direct) {
+ uint64_t ip;
+
+ ip = insn->ip;
+ ip += insn->size;
+ ip += iext->variant.branch.displacement;
+
+ if (ip != ev->variant.disabled.ip)
+ break;
+ }
+
+ fallthrough;
+ case ptic_return:
+ case ptic_far_call:
+ case ptic_far_return:
+ case ptic_far_jump:
+ case ptic_cond_jump:
+ return 1;
+
+ case ptic_error:
+ return -pte_bad_insn;
+ }
+ }
+
+ return 0;
+}
+
+/* Postpone proceeding past @insn/@iext and indicate a pending event.
+ *
+ * There may be further events pending on @insn/@iext. Postpone proceeding past
+ * @insn/@iext until we processed all events that bind to it.
+ *
+ * Returns a non-negative pt_status_flag bit-vector indicating a pending event
+ * on success, a negative pt_error_code otherwise.
+ */
+static int pt_insn_postpone(struct pt_insn_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ if (!decoder || !insn || !iext)
+ return -pte_internal;
+
+ if (!decoder->process_insn) {
+ decoder->process_insn = 1;
+ decoder->insn = *insn;
+ decoder->iext = *iext;
+ }
+
+ return pt_insn_status(decoder, pts_event_pending);
+}
+
+/* Remove any postponed instruction from @decoder.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_insn_clear_postponed(struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->process_insn = 0;
+ decoder->bound_paging = 0;
+ decoder->bound_vmcs = 0;
+ decoder->bound_ptwrite = 0;
+
+ return 0;
+}
+
+/* Proceed past a postponed instruction.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_insn_proceed_postponed(struct pt_insn_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ if (!decoder->process_insn)
+ return -pte_internal;
+
+ /* There's nothing to do if tracing got disabled. */
+ if (!decoder->enabled)
+ return pt_insn_clear_postponed(decoder);
+
+ status = pt_insn_proceed(decoder, &decoder->insn, &decoder->iext);
+ if (status < 0)
+ return status;
+
+ return pt_insn_clear_postponed(decoder);
+}
+
+/* Check for events that bind to instruction.
+ *
+ * Check whether an event is pending that binds to @insn/@iext, and, if that is
+ * the case, proceed past @insn/@iext and indicate the event by setting
+ * pts_event_pending.
+ *
+ * If that is not the case, we return zero. This is what pt_insn_status() would
+ * return since:
+ *
+ * - we suppress pts_eos as long as we're processing events
+ * - we do not set pts_ip_suppressed since tracing must be enabled
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ */
+static int pt_insn_check_insn_event(struct pt_insn_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ struct pt_event *ev;
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = event_pending(decoder);
+ if (status <= 0)
+ return status;
+
+ ev = &decoder->event;
+ switch (ev->type) {
+ case ptev_enabled:
+ case ptev_overflow:
+ case ptev_async_paging:
+ case ptev_async_vmcs:
+ case ptev_async_disabled:
+ case ptev_async_branch:
+ case ptev_exec_mode:
+ case ptev_tsx:
+ case ptev_stop:
+ case ptev_exstop:
+ case ptev_mwait:
+ case ptev_pwre:
+ case ptev_pwrx:
+ case ptev_tick:
+ case ptev_cbr:
+ case ptev_mnt:
+ /* We're only interested in events that bind to instructions. */
+ return 0;
+
+ case ptev_disabled:
+ status = pt_insn_at_disabled_event(ev, insn, iext,
+ &decoder->query.config);
+ if (status <= 0)
+ return status;
+
+ /* We're at a synchronous disable event location.
+ *
+ * Let's determine the IP at which we expect tracing to resume.
+ */
+ status = pt_insn_next_ip(&decoder->ip, insn, iext);
+ if (status < 0) {
+ /* We don't know the IP on error. */
+ decoder->ip = 0ull;
+
+ /* For indirect calls, assume that we return to the next
+ * instruction.
+ *
+ * We only check the instruction class, not the
+ * is_direct property, since direct calls would have
+ * been handled by pt_insn_nex_ip() or would have
+ * provoked a different error.
+ */
+ if (status != -pte_bad_query)
+ return status;
+
+ switch (insn->iclass) {
+ case ptic_call:
+ case ptic_far_call:
+ decoder->ip = insn->ip + insn->size;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ break;
+
+ case ptev_paging:
+ /* We bind at most one paging event to an instruction. */
+ if (decoder->bound_paging)
+ return 0;
+
+ if (!pt_insn_binds_to_pip(insn, iext))
+ return 0;
+
+ /* We bound a paging event. Make sure we do not bind further
+ * paging events to this instruction.
+ */
+ decoder->bound_paging = 1;
+
+ return pt_insn_postpone(decoder, insn, iext);
+
+ case ptev_vmcs:
+ /* We bind at most one vmcs event to an instruction. */
+ if (decoder->bound_vmcs)
+ return 0;
+
+ if (!pt_insn_binds_to_vmcs(insn, iext))
+ return 0;
+
+ /* We bound a vmcs event. Make sure we do not bind further vmcs
+ * events to this instruction.
+ */
+ decoder->bound_vmcs = 1;
+
+ return pt_insn_postpone(decoder, insn, iext);
+
+ case ptev_ptwrite:
+ /* We bind at most one ptwrite event to an instruction. */
+ if (decoder->bound_ptwrite)
+ return 0;
+
+ if (ev->ip_suppressed) {
+ if (!pt_insn_is_ptwrite(insn, iext))
+ return 0;
+
+ /* Fill in the event IP. Our users will need them to
+ * make sense of the PTWRITE payload.
+ */
+ ev->variant.ptwrite.ip = decoder->ip;
+ ev->ip_suppressed = 0;
+ } else {
+ /* The ptwrite event contains the IP of the ptwrite
+ * instruction (CLIP) unlike most events that contain
+ * the IP of the first instruction that did not complete
+ * (NLIP).
+ *
+ * It's easier to handle this case here, as well.
+ */
+ if (decoder->ip != ev->variant.ptwrite.ip)
+ return 0;
+ }
+
+ /* We bound a ptwrite event. Make sure we do not bind further
+ * ptwrite events to this instruction.
+ */
+ decoder->bound_ptwrite = 1;
+
+ return pt_insn_postpone(decoder, insn, iext);
+ }
+
+ return pt_insn_status(decoder, pts_event_pending);
+}
+
+enum {
+ /* The maximum number of steps to take when determining whether the
+ * event location can be reached.
+ */
+ bdm64_max_steps = 0x100
+};
+
+/* Try to work around erratum BDM64.
+ *
+ * If we got a transaction abort immediately following a branch that produced
+ * trace, the trace for that branch might have been corrupted.
+ *
+ * Returns a positive integer if the erratum was handled.
+ * Returns zero if the erratum does not seem to apply.
+ * Returns a negative error code otherwise.
+ */
+static int handle_erratum_bdm64(struct pt_insn_decoder *decoder,
+ const struct pt_event *ev,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ int status;
+
+ if (!decoder || !ev || !insn || !iext)
+ return -pte_internal;
+
+ /* This only affects aborts. */
+ if (!ev->variant.tsx.aborted)
+ return 0;
+
+ /* This only affects branches. */
+ if (!pt_insn_is_branch(insn, iext))
+ return 0;
+
+ /* Let's check if we can reach the event location from here.
+ *
+ * If we can, let's assume the erratum did not hit. We might still be
+ * wrong but we're not able to tell.
+ */
+ status = pt_insn_range_is_contiguous(decoder->ip, ev->variant.tsx.ip,
+ decoder->mode, decoder->image,
+ &decoder->asid, bdm64_max_steps);
+ if (status > 0)
+ return 0;
+
+ /* We can't reach the event location. This could either mean that we
+ * stopped too early (and status is zero) or that the erratum hit.
+ *
+ * We assume the latter and pretend that the previous branch brought us
+ * to the event location, instead.
+ */
+ decoder->ip = ev->variant.tsx.ip;
+
+ return 1;
+}
+
+/* Check whether a peek TSX event should be postponed.
+ *
+ * This involves handling erratum BDM64.
+ *
+ * Returns a positive integer if the event is to be postponed.
+ * Returns zero if the event should be processed.
+ * Returns a negative error code otherwise.
+ */
+static inline int pt_insn_postpone_tsx(struct pt_insn_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext,
+ const struct pt_event *ev)
+{
+ int status;
+
+ if (!decoder || !ev)
+ return -pte_internal;
+
+ if (ev->ip_suppressed)
+ return 0;
+
+ if (insn && iext && decoder->query.config.errata.bdm64) {
+ status = handle_erratum_bdm64(decoder, ev, insn, iext);
+ if (status < 0)
+ return status;
+ }
+
+ if (decoder->ip != ev->variant.tsx.ip)
+ return 1;
+
+ return 0;
+}
+
+/* Check for events that bind to an IP.
+ *
+ * Check whether an event is pending that binds to @decoder->ip, and, if that is
+ * the case, indicate the event by setting pt_pts_event_pending.
+ *
+ * Returns a non-negative pt_status_flag bit-vector on success, a negative error
+ * code otherwise.
+ */
+static int pt_insn_check_ip_event(struct pt_insn_decoder *decoder,
+ const struct pt_insn *insn,
+ const struct pt_insn_ext *iext)
+{
+ struct pt_event *ev;
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = event_pending(decoder);
+ if (status <= 0) {
+ if (status < 0)
+ return status;
+
+ return pt_insn_status(decoder, 0);
+ }
+
+ ev = &decoder->event;
+ switch (ev->type) {
+ case ptev_disabled:
+ break;
+
+ case ptev_enabled:
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_async_disabled:
+ if (ev->variant.async_disabled.at != decoder->ip)
+ break;
+
+ if (decoder->query.config.errata.skd022) {
+ int errcode;
+
+ errcode = handle_erratum_skd022(decoder);
+ if (errcode != 0) {
+ if (errcode < 0)
+ return errcode;
+
+ /* If the erratum applies, we postpone the
+ * modified event to the next call to
+ * pt_insn_next().
+ */
+ break;
+ }
+ }
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_tsx:
+ status = pt_insn_postpone_tsx(decoder, insn, iext, ev);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ break;
+ }
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_async_branch:
+ if (ev->variant.async_branch.from != decoder->ip)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_overflow:
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_exec_mode:
+ if (!ev->ip_suppressed &&
+ ev->variant.exec_mode.ip != decoder->ip)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_paging:
+ if (decoder->enabled)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_async_paging:
+ if (!ev->ip_suppressed &&
+ ev->variant.async_paging.ip != decoder->ip)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_vmcs:
+ if (decoder->enabled)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_async_vmcs:
+ if (!ev->ip_suppressed &&
+ ev->variant.async_vmcs.ip != decoder->ip)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_stop:
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_exstop:
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.exstop.ip)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_mwait:
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.mwait.ip)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_pwre:
+ case ptev_pwrx:
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_ptwrite:
+ /* Any event binding to the current PTWRITE instruction is
+ * handled in pt_insn_check_insn_event().
+ *
+ * Any subsequent ptwrite event binds to a different instruction
+ * and must wait until the next iteration - as long as tracing
+ * is enabled.
+ *
+ * When tracing is disabled, we forward all ptwrite events
+ * immediately to the user.
+ */
+ if (decoder->enabled)
+ break;
+
+ return pt_insn_status(decoder, pts_event_pending);
+
+ case ptev_tick:
+ case ptev_cbr:
+ case ptev_mnt:
+ return pt_insn_status(decoder, pts_event_pending);
+ }
+
+ return pt_insn_status(decoder, 0);
+}
+
+static inline int insn_to_user(struct pt_insn *uinsn, size_t size,
+ const struct pt_insn *insn)
+{
+ if (!uinsn || !insn)
+ return -pte_internal;
+
+ if (uinsn == insn)
+ return 0;
+
+ /* Zero out any unknown bytes. */
+ if (sizeof(*insn) < size) {
+ memset(uinsn + sizeof(*insn), 0, size - sizeof(*insn));
+
+ size = sizeof(*insn);
+ }
+
+ memcpy(uinsn, insn, size);
+
+ return 0;
+}
+
+static int pt_insn_decode_cached(struct pt_insn_decoder *decoder,
+ const struct pt_mapped_section *msec,
+ struct pt_insn *insn, struct pt_insn_ext *iext)
+{
+ int status;
+
+ if (!decoder || !insn || !iext)
+ return -pte_internal;
+
+ /* Try reading the memory containing @insn from the cached section. If
+ * that fails, if we don't have a cached section, or if decode fails
+ * later on, fall back to decoding @insn from @decoder->image.
+ *
+ * The latter will also handle truncated instructions that cross section
+ * boundaries.
+ */
+
+ if (!msec)
+ return pt_insn_decode(insn, iext, decoder->image,
+ &decoder->asid);
+
+ status = pt_msec_read(msec, insn->raw, sizeof(insn->raw), insn->ip);
+ if (status < 0) {
+ if (status != -pte_nomap)
+ return status;
+
+ return pt_insn_decode(insn, iext, decoder->image,
+ &decoder->asid);
+ }
+
+ /* We initialize @insn->size to the maximal possible size. It will be
+ * set to the actual size during instruction decode.
+ */
+ insn->size = (uint8_t) status;
+
+ status = pt_ild_decode(insn, iext);
+ if (status < 0) {
+ if (status != -pte_bad_insn)
+ return status;
+
+ return pt_insn_decode(insn, iext, decoder->image,
+ &decoder->asid);
+ }
+
+ return status;
+}
+
+static int pt_insn_msec_lookup(struct pt_insn_decoder *decoder,
+ const struct pt_mapped_section **pmsec)
+{
+ struct pt_msec_cache *scache;
+ struct pt_image *image;
+ uint64_t ip;
+ int isid;
+
+ if (!decoder || !pmsec)
+ return -pte_internal;
+
+ scache = &decoder->scache;
+ image = decoder->image;
+ ip = decoder->ip;
+
+ isid = pt_msec_cache_read(scache, pmsec, image, ip);
+ if (isid < 0) {
+ if (isid != -pte_nomap)
+ return isid;
+
+ return pt_msec_cache_fill(scache, pmsec, image,
+ &decoder->asid, ip);
+ }
+
+ return isid;
+}
+
+int pt_insn_next(struct pt_insn_decoder *decoder, struct pt_insn *uinsn,
+ size_t size)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_insn_ext iext;
+ struct pt_insn insn, *pinsn;
+ int status, isid;
+
+ if (!uinsn || !decoder)
+ return -pte_invalid;
+
+ /* Tracing must be enabled.
+ *
+ * If it isn't we should be processing events until we either run out of
+ * trace or process a tracing enabled event.
+ */
+ if (!decoder->enabled) {
+ if (decoder->status & pts_eos)
+ return -pte_eos;
+
+ return -pte_no_enable;
+ }
+
+ pinsn = size == sizeof(insn) ? uinsn : &insn;
+
+ /* Zero-initialize the instruction in case of error returns. */
+ memset(pinsn, 0, sizeof(*pinsn));
+
+ /* Fill in a few things from the current decode state.
+ *
+ * This reflects the state of the last pt_insn_next(), pt_insn_event()
+ * or pt_insn_start() call.
+ */
+ if (decoder->speculative)
+ pinsn->speculative = 1;
+ pinsn->ip = decoder->ip;
+ pinsn->mode = decoder->mode;
+
+ isid = pt_insn_msec_lookup(decoder, &msec);
+ if (isid < 0) {
+ if (isid != -pte_nomap)
+ return isid;
+
+ msec = NULL;
+ }
+
+ /* We set an incorrect isid if @msec is NULL. This will be corrected
+ * when we read the memory from the image later on.
+ */
+ pinsn->isid = isid;
+
+ status = pt_insn_decode_cached(decoder, msec, pinsn, &iext);
+ if (status < 0) {
+ /* Provide the incomplete instruction - the IP and mode fields
+ * are valid and may help diagnose the error.
+ */
+ (void) insn_to_user(uinsn, size, pinsn);
+ return status;
+ }
+
+ /* Provide the decoded instruction to the user. It won't change during
+ * event processing.
+ */
+ status = insn_to_user(uinsn, size, pinsn);
+ if (status < 0)
+ return status;
+
+ /* Check for events that bind to the current instruction.
+ *
+ * If an event is indicated, we're done.
+ */
+ status = pt_insn_check_insn_event(decoder, pinsn, &iext);
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ if (status & pts_event_pending)
+ return status;
+ }
+
+ /* Determine the next instruction's IP. */
+ status = pt_insn_proceed(decoder, pinsn, &iext);
+ if (status < 0)
+ return status;
+
+ /* Indicate events that bind to the new IP.
+ *
+ * Although we only look at the IP for binding events, we pass the
+ * decoded instruction in order to handle errata.
+ */
+ return pt_insn_check_ip_event(decoder, pinsn, &iext);
+}
+
+static int pt_insn_process_enabled(struct pt_insn_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = &decoder->event;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* We must have an IP in order to start decoding. */
+ if (ev->ip_suppressed)
+ return -pte_noip;
+
+ /* We must currently be disabled. */
+ if (decoder->enabled)
+ return -pte_bad_context;
+
+ decoder->ip = ev->variant.enabled.ip;
+ decoder->enabled = 1;
+
+ return 0;
+}
+
+static int pt_insn_process_disabled(struct pt_insn_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = &decoder->event;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* We must currently be enabled. */
+ if (!decoder->enabled)
+ return -pte_bad_context;
+
+ /* We preserve @decoder->ip. This is where we expect tracing to resume
+ * and we'll indicate that on the subsequent enabled event if tracing
+ * actually does resume from there.
+ */
+ decoder->enabled = 0;
+
+ return 0;
+}
+
+static int pt_insn_process_async_branch(struct pt_insn_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = &decoder->event;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* Tracing must be enabled in order to make sense of the event. */
+ if (!decoder->enabled)
+ return -pte_bad_context;
+
+ decoder->ip = ev->variant.async_branch.to;
+
+ return 0;
+}
+
+static int pt_insn_process_paging(struct pt_insn_decoder *decoder)
+{
+ uint64_t cr3;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ cr3 = decoder->event.variant.paging.cr3;
+ if (decoder->asid.cr3 != cr3) {
+ errcode = pt_msec_cache_invalidate(&decoder->scache);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->asid.cr3 = cr3;
+ }
+
+ return 0;
+}
+
+static int pt_insn_process_overflow(struct pt_insn_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = &decoder->event;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* If the IP is suppressed, the overflow resolved while tracing was
+ * disabled. Otherwise it resolved while tracing was enabled.
+ */
+ if (ev->ip_suppressed) {
+ /* Tracing is disabled.
+ *
+ * It doesn't make sense to preserve the previous IP. This will
+ * just be misleading. Even if tracing had been disabled
+ * before, as well, we might have missed the re-enable in the
+ * overflow.
+ */
+ decoder->enabled = 0;
+ decoder->ip = 0ull;
+ } else {
+ /* Tracing is enabled and we're at the IP at which the overflow
+ * resolved.
+ */
+ decoder->ip = ev->variant.overflow.ip;
+ decoder->enabled = 1;
+ }
+
+ /* We don't know the TSX state. Let's assume we execute normally.
+ *
+ * We also don't know the execution mode. Let's keep what we have
+ * in case we don't get an update before we have to decode the next
+ * instruction.
+ */
+ decoder->speculative = 0;
+
+ return 0;
+}
+
+static int pt_insn_process_exec_mode(struct pt_insn_decoder *decoder)
+{
+ enum pt_exec_mode mode;
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = &decoder->event;
+ mode = ev->variant.exec_mode.mode;
+
+ /* Use status update events to diagnose inconsistencies. */
+ if (ev->status_update && decoder->enabled &&
+ decoder->mode != ptem_unknown && decoder->mode != mode)
+ return -pte_bad_status_update;
+
+ decoder->mode = mode;
+
+ return 0;
+}
+
+static int pt_insn_process_tsx(struct pt_insn_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->speculative = decoder->event.variant.tsx.speculative;
+
+ return 0;
+}
+
+static int pt_insn_process_stop(struct pt_insn_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = &decoder->event;
+
+ /* This event can't be a status update. */
+ if (ev->status_update)
+ return -pte_bad_context;
+
+ /* Tracing is always disabled before it is stopped. */
+ if (decoder->enabled)
+ return -pte_bad_context;
+
+ return 0;
+}
+
+static int pt_insn_process_vmcs(struct pt_insn_decoder *decoder)
+{
+ uint64_t vmcs;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ vmcs = decoder->event.variant.vmcs.base;
+ if (decoder->asid.vmcs != vmcs) {
+ errcode = pt_msec_cache_invalidate(&decoder->scache);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->asid.vmcs = vmcs;
+ }
+
+ return 0;
+}
+
+int pt_insn_event(struct pt_insn_decoder *decoder, struct pt_event *uevent,
+ size_t size)
+{
+ struct pt_event *ev;
+ int status;
+
+ if (!decoder || !uevent)
+ return -pte_invalid;
+
+ /* We must currently process an event. */
+ if (!decoder->process_event)
+ return -pte_bad_query;
+
+ ev = &decoder->event;
+ switch (ev->type) {
+ default:
+ /* This is not a user event.
+ *
+ * We either indicated it wrongly or the user called
+ * pt_insn_event() without a pts_event_pending indication.
+ */
+ return -pte_bad_query;
+
+ case ptev_enabled:
+ /* Indicate that tracing resumes from the IP at which tracing
+ * had been disabled before (with some special treatment for
+ * calls).
+ */
+ if (decoder->ip == ev->variant.enabled.ip)
+ ev->variant.enabled.resumed = 1;
+
+ status = pt_insn_process_enabled(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_disabled:
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_disabled.at)
+ return -pte_bad_query;
+
+ fallthrough;
+ case ptev_disabled:
+ status = pt_insn_process_disabled(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_branch:
+ if (decoder->ip != ev->variant.async_branch.from)
+ return -pte_bad_query;
+
+ status = pt_insn_process_async_branch(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_paging:
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_paging.ip)
+ return -pte_bad_query;
+
+ fallthrough;
+ case ptev_paging:
+ status = pt_insn_process_paging(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_async_vmcs:
+ if (!ev->ip_suppressed &&
+ decoder->ip != ev->variant.async_vmcs.ip)
+ return -pte_bad_query;
+
+ fallthrough;
+ case ptev_vmcs:
+ status = pt_insn_process_vmcs(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_overflow:
+ status = pt_insn_process_overflow(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_exec_mode:
+ status = pt_insn_process_exec_mode(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_tsx:
+ status = pt_insn_process_tsx(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_stop:
+ status = pt_insn_process_stop(decoder);
+ if (status < 0)
+ return status;
+
+ break;
+
+ case ptev_exstop:
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.exstop.ip)
+ return -pte_bad_query;
+
+ break;
+
+ case ptev_mwait:
+ if (!ev->ip_suppressed && decoder->enabled &&
+ decoder->ip != ev->variant.mwait.ip)
+ return -pte_bad_query;
+
+ break;
+
+ case ptev_pwre:
+ case ptev_pwrx:
+ case ptev_ptwrite:
+ case ptev_tick:
+ case ptev_cbr:
+ case ptev_mnt:
+ break;
+ }
+
+ /* Copy the event to the user. Make sure we're not writing beyond the
+ * memory provided by the user.
+ *
+ * We might truncate details of an event but only for those events the
+ * user can't know about, anyway.
+ */
+ if (sizeof(*ev) < size)
+ size = sizeof(*ev);
+
+ memcpy(uevent, ev, size);
+
+ /* This completes processing of the current event. */
+ decoder->process_event = 0;
+
+ /* If we just handled an instruction event, check for further events
+ * that bind to this instruction.
+ *
+ * If we don't have further events, proceed beyond the instruction so we
+ * can check for IP events, as well.
+ */
+ if (decoder->process_insn) {
+ status = pt_insn_check_insn_event(decoder, &decoder->insn,
+ &decoder->iext);
+
+ if (status != 0) {
+ if (status < 0)
+ return status;
+
+ if (status & pts_event_pending)
+ return status;
+ }
+
+ /* Proceed to the next instruction. */
+ status = pt_insn_proceed_postponed(decoder);
+ if (status < 0)
+ return status;
+ }
+
+ /* Indicate further events that bind to the same IP. */
+ return pt_insn_check_ip_event(decoder, NULL, NULL);
+}
diff --git a/contrib/processor-trace/libipt/src/pt_last_ip.c b/contrib/processor-trace/libipt/src/pt_last_ip.c
new file mode 100644
index 0000000000000..3c98c9c3c95a3
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_last_ip.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_last_ip.h"
+
+#include "intel-pt.h"
+
+
+void pt_last_ip_init(struct pt_last_ip *last_ip)
+{
+ if (!last_ip)
+ return;
+
+ last_ip->ip = 0ull;
+ last_ip->have_ip = 0;
+ last_ip->suppressed = 0;
+}
+
+int pt_last_ip_query(uint64_t *ip, const struct pt_last_ip *last_ip)
+{
+ if (!last_ip)
+ return -pte_internal;
+
+ if (!last_ip->have_ip) {
+ if (ip)
+ *ip = 0ull;
+ return -pte_noip;
+ }
+
+ if (last_ip->suppressed) {
+ if (ip)
+ *ip = 0ull;
+ return -pte_ip_suppressed;
+ }
+
+ if (ip)
+ *ip = last_ip->ip;
+
+ return 0;
+}
+
+/* Sign-extend a uint64_t value. */
+static uint64_t sext(uint64_t val, uint8_t sign)
+{
+ uint64_t signbit, mask;
+
+ signbit = 1ull << (sign - 1);
+ mask = ~0ull << sign;
+
+ return val & signbit ? val | mask : val & ~mask;
+}
+
+int pt_last_ip_update_ip(struct pt_last_ip *last_ip,
+ const struct pt_packet_ip *packet,
+ const struct pt_config *config)
+{
+ (void) config;
+
+ if (!last_ip || !packet)
+ return -pte_internal;
+
+ switch (packet->ipc) {
+ case pt_ipc_suppressed:
+ last_ip->suppressed = 1;
+ return 0;
+
+ case pt_ipc_sext_48:
+ last_ip->ip = sext(packet->ip, 48);
+ last_ip->have_ip = 1;
+ last_ip->suppressed = 0;
+ return 0;
+
+ case pt_ipc_update_16:
+ last_ip->ip = (last_ip->ip & ~0xffffull)
+ | (packet->ip & 0xffffull);
+ last_ip->have_ip = 1;
+ last_ip->suppressed = 0;
+ return 0;
+
+ case pt_ipc_update_32:
+ last_ip->ip = (last_ip->ip & ~0xffffffffull)
+ | (packet->ip & 0xffffffffull);
+ last_ip->have_ip = 1;
+ last_ip->suppressed = 0;
+ return 0;
+
+ case pt_ipc_update_48:
+ last_ip->ip = (last_ip->ip & ~0xffffffffffffull)
+ | (packet->ip & 0xffffffffffffull);
+ last_ip->have_ip = 1;
+ last_ip->suppressed = 0;
+ return 0;
+
+ case pt_ipc_full:
+ last_ip->ip = packet->ip;
+ last_ip->have_ip = 1;
+ last_ip->suppressed = 0;
+ return 0;
+ }
+
+ return -pte_bad_packet;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_msec_cache.c b/contrib/processor-trace/libipt/src/pt_msec_cache.c
new file mode 100644
index 0000000000000..da946e0552d2a
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_msec_cache.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_msec_cache.h"
+#include "pt_section.h"
+#include "pt_image.h"
+
+#include <string.h>
+
+
+int pt_msec_cache_init(struct pt_msec_cache *cache)
+{
+ if (!cache)
+ return -pte_internal;
+
+ memset(cache, 0, sizeof(*cache));
+
+ return 0;
+}
+
+void pt_msec_cache_fini(struct pt_msec_cache *cache)
+{
+ if (!cache)
+ return;
+
+ (void) pt_msec_cache_invalidate(cache);
+ pt_msec_fini(&cache->msec);
+}
+
+int pt_msec_cache_invalidate(struct pt_msec_cache *cache)
+{
+ struct pt_section *section;
+ int errcode;
+
+ if (!cache)
+ return -pte_internal;
+
+ section = pt_msec_section(&cache->msec);
+ if (!section)
+ return 0;
+
+ errcode = pt_section_unmap(section);
+ if (errcode < 0)
+ return errcode;
+
+ cache->msec.section = NULL;
+
+ return pt_section_put(section);
+}
+
+int pt_msec_cache_read(struct pt_msec_cache *cache,
+ const struct pt_mapped_section **pmsec,
+ struct pt_image *image, uint64_t vaddr)
+{
+ struct pt_mapped_section *msec;
+ int isid, errcode;
+
+ if (!cache || !pmsec)
+ return -pte_internal;
+
+ msec = &cache->msec;
+ isid = cache->isid;
+
+ errcode = pt_image_validate(image, msec, vaddr, isid);
+ if (errcode < 0)
+ return errcode;
+
+ *pmsec = msec;
+
+ return isid;
+
+}
+
+int pt_msec_cache_fill(struct pt_msec_cache *cache,
+ const struct pt_mapped_section **pmsec,
+ struct pt_image *image, const struct pt_asid *asid,
+ uint64_t vaddr)
+{
+ struct pt_mapped_section *msec;
+ struct pt_section *section;
+ int errcode, isid;
+
+ if (!cache || !pmsec)
+ return -pte_internal;
+
+ errcode = pt_msec_cache_invalidate(cache);
+ if (errcode < 0)
+ return errcode;
+
+ msec = &cache->msec;
+
+ isid = pt_image_find(image, msec, asid, vaddr);
+ if (isid < 0)
+ return isid;
+
+ section = pt_msec_section(msec);
+
+ errcode = pt_section_map(section);
+ if (errcode < 0) {
+ (void) pt_section_put(section);
+ msec->section = NULL;
+
+ return errcode;
+ }
+
+ *pmsec = msec;
+
+ cache->isid = isid;
+
+ return isid;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_packet.c b/contrib/processor-trace/libipt/src/pt_packet.c
new file mode 100644
index 0000000000000..e237427ec127e
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_packet.c
@@ -0,0 +1,573 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_packet.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+#include <limits.h>
+
+
+static uint64_t pt_pkt_read_value(const uint8_t *pos, int size)
+{
+ uint64_t val;
+ int idx;
+
+ for (val = 0, idx = 0; idx < size; ++idx) {
+ uint64_t byte = *pos++;
+
+ byte <<= (idx * 8);
+ val |= byte;
+ }
+
+ return val;
+}
+
+int pt_pkt_read_unknown(struct pt_packet *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ int (*decode)(struct pt_packet_unknown *, const struct pt_config *,
+ const uint8_t *, void *);
+ int size;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ decode = config->decode.callback;
+ if (!decode)
+ return -pte_bad_opc;
+
+ /* Fill in some default values. */
+ packet->payload.unknown.packet = pos;
+ packet->payload.unknown.priv = NULL;
+
+ /* We accept a size of zero to allow the callback to modify the
+ * trace buffer and resume normal decoding.
+ */
+ size = (*decode)(&packet->payload.unknown, config, pos,
+ config->decode.context);
+ if (size < 0)
+ return size;
+
+ if (size > UCHAR_MAX)
+ return -pte_invalid;
+
+ packet->type = ppt_unknown;
+ packet->size = (uint8_t) size;
+
+ if (config->end < pos + size)
+ return -pte_eos;
+
+ return size;
+}
+
+int pt_pkt_read_psb(const uint8_t *pos, const struct pt_config *config)
+{
+ int count;
+
+ if (!pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_psb)
+ return -pte_eos;
+
+ pos += pt_opcs_psb;
+
+ for (count = 0; count < pt_psb_repeat_count; ++count) {
+ if (*pos++ != pt_psb_hi)
+ return -pte_bad_packet;
+ if (*pos++ != pt_psb_lo)
+ return -pte_bad_packet;
+ }
+
+ return ptps_psb;
+}
+
+static int pt_pkt_ip_size(enum pt_ip_compression ipc)
+{
+ switch (ipc) {
+ case pt_ipc_suppressed:
+ return 0;
+
+ case pt_ipc_update_16:
+ return 2;
+
+ case pt_ipc_update_32:
+ return 4;
+
+ case pt_ipc_update_48:
+ case pt_ipc_sext_48:
+ return 6;
+
+ case pt_ipc_full:
+ return 8;
+ }
+
+ return -pte_bad_packet;
+}
+
+int pt_pkt_read_ip(struct pt_packet_ip *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint64_t ip;
+ uint8_t ipc;
+ int ipsize;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ ipc = (*pos++ >> pt_opm_ipc_shr) & pt_opm_ipc_shr_mask;
+
+ ip = 0ull;
+ ipsize = pt_pkt_ip_size((enum pt_ip_compression) ipc);
+ if (ipsize < 0)
+ return ipsize;
+
+ if (config->end < pos + ipsize)
+ return -pte_eos;
+
+ if (ipsize)
+ ip = pt_pkt_read_value(pos, ipsize);
+
+ packet->ipc = (enum pt_ip_compression) ipc;
+ packet->ip = ip;
+
+ return ipsize + 1;
+}
+
+static uint8_t pt_pkt_tnt_bit_size(uint64_t payload)
+{
+ uint8_t size;
+
+ /* The payload bit-size is the bit-index of the payload's stop-bit,
+ * which itself is not part of the payload proper.
+ */
+ for (size = 0; ; size += 1) {
+ payload >>= 1;
+ if (!payload)
+ break;
+ }
+
+ return size;
+}
+
+static int pt_pkt_read_tnt(struct pt_packet_tnt *packet, uint64_t payload)
+{
+ uint8_t bit_size;
+
+ if (!packet)
+ return -pte_internal;
+
+ bit_size = pt_pkt_tnt_bit_size(payload);
+ if (!bit_size)
+ return -pte_bad_packet;
+
+ /* Remove the stop bit from the payload. */
+ payload &= ~(1ull << bit_size);
+
+ packet->payload = payload;
+ packet->bit_size = bit_size;
+
+ return 0;
+}
+
+int pt_pkt_read_tnt_8(struct pt_packet_tnt *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ (void) config;
+
+ if (!pos)
+ return -pte_internal;
+
+ errcode = pt_pkt_read_tnt(packet, pos[0] >> pt_opm_tnt_8_shr);
+ if (errcode < 0)
+ return errcode;
+
+ return ptps_tnt_8;
+}
+
+int pt_pkt_read_tnt_64(struct pt_packet_tnt *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint64_t payload;
+ int errcode;
+
+ if (!pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_tnt_64)
+ return -pte_eos;
+
+ payload = pt_pkt_read_value(pos + pt_opcs_tnt_64, pt_pl_tnt_64_size);
+
+ errcode = pt_pkt_read_tnt(packet, payload);
+ if (errcode < 0)
+ return errcode;
+
+ return ptps_tnt_64;
+}
+
+int pt_pkt_read_pip(struct pt_packet_pip *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint64_t payload;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_pip)
+ return -pte_eos;
+
+ /* Read the payload. */
+ payload = pt_pkt_read_value(pos + pt_opcs_pip, pt_pl_pip_size);
+
+ /* Extract the non-root information from the payload. */
+ packet->nr = payload & pt_pl_pip_nr;
+
+ /* Create the cr3 value. */
+ payload >>= pt_pl_pip_shr;
+ payload <<= pt_pl_pip_shl;
+ packet->cr3 = payload;
+
+ return ptps_pip;
+}
+
+static int pt_pkt_read_mode_exec(struct pt_packet_mode_exec *packet,
+ uint8_t mode)
+{
+ if (!packet)
+ return -pte_internal;
+
+ packet->csl = (mode & pt_mob_exec_csl) != 0;
+ packet->csd = (mode & pt_mob_exec_csd) != 0;
+
+ return ptps_mode;
+}
+
+static int pt_pkt_read_mode_tsx(struct pt_packet_mode_tsx *packet,
+ uint8_t mode)
+{
+ if (!packet)
+ return -pte_internal;
+
+ packet->intx = (mode & pt_mob_tsx_intx) != 0;
+ packet->abrt = (mode & pt_mob_tsx_abrt) != 0;
+
+ return ptps_mode;
+}
+
+int pt_pkt_read_mode(struct pt_packet_mode *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint8_t payload, mode, leaf;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_mode)
+ return -pte_eos;
+
+ payload = pos[pt_opcs_mode];
+ leaf = payload & pt_mom_leaf;
+ mode = payload & pt_mom_bits;
+
+ packet->leaf = (enum pt_mode_leaf) leaf;
+ switch (leaf) {
+ default:
+ return -pte_bad_packet;
+
+ case pt_mol_exec:
+ return pt_pkt_read_mode_exec(&packet->bits.exec, mode);
+
+ case pt_mol_tsx:
+ return pt_pkt_read_mode_tsx(&packet->bits.tsx, mode);
+ }
+}
+
+int pt_pkt_read_tsc(struct pt_packet_tsc *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_tsc)
+ return -pte_eos;
+
+ packet->tsc = pt_pkt_read_value(pos + pt_opcs_tsc, pt_pl_tsc_size);
+
+ return ptps_tsc;
+}
+
+int pt_pkt_read_cbr(struct pt_packet_cbr *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_cbr)
+ return -pte_eos;
+
+ packet->ratio = pos[2];
+
+ return ptps_cbr;
+}
+
+int pt_pkt_read_tma(struct pt_packet_tma *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint16_t ctc, fc;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_tma)
+ return -pte_eos;
+
+ ctc = pos[pt_pl_tma_ctc_0];
+ ctc |= pos[pt_pl_tma_ctc_1] << 8;
+
+ fc = pos[pt_pl_tma_fc_0];
+ fc |= pos[pt_pl_tma_fc_1] << 8;
+
+ if (fc & ~pt_pl_tma_fc_mask)
+ return -pte_bad_packet;
+
+ packet->ctc = ctc;
+ packet->fc = fc;
+
+ return ptps_tma;
+}
+
+int pt_pkt_read_mtc(struct pt_packet_mtc *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_mtc)
+ return -pte_eos;
+
+ packet->ctc = pos[pt_opcs_mtc];
+
+ return ptps_mtc;
+}
+
+int pt_pkt_read_cyc(struct pt_packet_cyc *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+ uint64_t value;
+ uint8_t cyc, ext, shl;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ begin = pos;
+ end = config->end;
+
+ /* The first byte contains the opcode and part of the payload.
+ * We already checked that this first byte is within bounds.
+ */
+ cyc = *pos++;
+
+ ext = cyc & pt_opm_cyc_ext;
+ cyc >>= pt_opm_cyc_shr;
+
+ value = cyc;
+ shl = (8 - pt_opm_cyc_shr);
+
+ while (ext) {
+ uint64_t bits;
+
+ if (end <= pos)
+ return -pte_eos;
+
+ bits = *pos++;
+ ext = bits & pt_opm_cycx_ext;
+
+ bits >>= pt_opm_cycx_shr;
+ bits <<= shl;
+
+ shl += (8 - pt_opm_cycx_shr);
+ if (sizeof(value) * 8 < shl)
+ return -pte_bad_packet;
+
+ value |= bits;
+ }
+
+ packet->value = value;
+
+ return (int) (pos - begin);
+}
+
+int pt_pkt_read_vmcs(struct pt_packet_vmcs *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint64_t payload;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_vmcs)
+ return -pte_eos;
+
+ payload = pt_pkt_read_value(pos + pt_opcs_vmcs, pt_pl_vmcs_size);
+
+ packet->base = payload << pt_pl_vmcs_shl;
+
+ return ptps_vmcs;
+}
+
+int pt_pkt_read_mnt(struct pt_packet_mnt *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_mnt)
+ return -pte_eos;
+
+ packet->payload = pt_pkt_read_value(pos + pt_opcs_mnt, pt_pl_mnt_size);
+
+ return ptps_mnt;
+}
+
+int pt_pkt_read_exstop(struct pt_packet_exstop *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_exstop)
+ return -pte_eos;
+
+ packet->ip = pos[1] & pt_pl_exstop_ip_mask ? 1 : 0;
+
+ return ptps_exstop;
+}
+
+int pt_pkt_read_mwait(struct pt_packet_mwait *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_mwait)
+ return -pte_eos;
+
+ packet->hints = (uint32_t) pt_pkt_read_value(pos + pt_opcs_mwait,
+ pt_pl_mwait_hints_size);
+ packet->ext = (uint32_t) pt_pkt_read_value(pos + pt_opcs_mwait +
+ pt_pl_mwait_hints_size,
+ pt_pl_mwait_ext_size);
+ return ptps_mwait;
+}
+
+int pt_pkt_read_pwre(struct pt_packet_pwre *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint64_t payload;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_pwre)
+ return -pte_eos;
+
+ payload = pt_pkt_read_value(pos + pt_opcs_pwre, pt_pl_pwre_size);
+
+ memset(packet, 0, sizeof(*packet));
+ packet->state = (uint8_t) ((payload & pt_pl_pwre_state_mask) >>
+ pt_pl_pwre_state_shr);
+ packet->sub_state = (uint8_t) ((payload & pt_pl_pwre_sub_state_mask) >>
+ pt_pl_pwre_sub_state_shr);
+ if (payload & pt_pl_pwre_hw_mask)
+ packet->hw = 1;
+
+ return ptps_pwre;
+}
+
+int pt_pkt_read_pwrx(struct pt_packet_pwrx *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint64_t payload;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ if (config->end < pos + ptps_pwrx)
+ return -pte_eos;
+
+ payload = pt_pkt_read_value(pos + pt_opcs_pwrx, pt_pl_pwrx_size);
+
+ memset(packet, 0, sizeof(*packet));
+ packet->last = (uint8_t) ((payload & pt_pl_pwrx_last_mask) >>
+ pt_pl_pwrx_last_shr);
+ packet->deepest = (uint8_t) ((payload & pt_pl_pwrx_deepest_mask) >>
+ pt_pl_pwrx_deepest_shr);
+ if (payload & pt_pl_pwrx_wr_int)
+ packet->interrupt = 1;
+ if (payload & pt_pl_pwrx_wr_store)
+ packet->store = 1;
+ if (payload & pt_pl_pwrx_wr_hw)
+ packet->autonomous = 1;
+
+ return ptps_pwrx;
+}
+
+int pt_pkt_read_ptw(struct pt_packet_ptw *packet, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ uint8_t opc, plc;
+ int size;
+
+ if (!packet || !pos || !config)
+ return -pte_internal;
+
+ /* Skip the ext opcode. */
+ pos++;
+
+ opc = *pos++;
+ plc = (opc >> pt_opm_ptw_pb_shr) & pt_opm_ptw_pb_shr_mask;
+
+ size = pt_ptw_size(plc);
+ if (size < 0)
+ return size;
+
+ if (config->end < pos + size)
+ return -pte_eos;
+
+ packet->payload = pt_pkt_read_value(pos, size);
+ packet->plc = plc;
+ packet->ip = opc & pt_opm_ptw_ip ? 1 : 0;
+
+ return pt_opcs_ptw + size;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_packet_decoder.c b/contrib/processor-trace/libipt/src/pt_packet_decoder.c
new file mode 100644
index 0000000000000..bf1a1bd0843e7
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_packet_decoder.c
@@ -0,0 +1,723 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_packet_decoder.h"
+#include "pt_decoder_function.h"
+#include "pt_packet.h"
+#include "pt_sync.h"
+#include "pt_config.h"
+#include "pt_opcodes.h"
+
+#include <string.h>
+#include <stdlib.h>
+
+
+int pt_pkt_decoder_init(struct pt_packet_decoder *decoder,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ if (!decoder || !config)
+ return -pte_invalid;
+
+ memset(decoder, 0, sizeof(*decoder));
+
+ errcode = pt_config_from_user(&decoder->config, config);
+ if (errcode < 0)
+ return errcode;
+
+ return 0;
+}
+
+struct pt_packet_decoder *pt_pkt_alloc_decoder(const struct pt_config *config)
+{
+ struct pt_packet_decoder *decoder;
+ int errcode;
+
+ decoder = malloc(sizeof(*decoder));
+ if (!decoder)
+ return NULL;
+
+ errcode = pt_pkt_decoder_init(decoder, config);
+ if (errcode < 0) {
+ free(decoder);
+ return NULL;
+ }
+
+ return decoder;
+}
+
+void pt_pkt_decoder_fini(struct pt_packet_decoder *decoder)
+{
+ (void) decoder;
+
+ /* Nothing to do. */
+}
+
+void pt_pkt_free_decoder(struct pt_packet_decoder *decoder)
+{
+ pt_pkt_decoder_fini(decoder);
+ free(decoder);
+}
+
+int pt_pkt_sync_forward(struct pt_packet_decoder *decoder)
+{
+ const uint8_t *pos, *sync;
+ int errcode;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ sync = decoder->sync;
+ pos = decoder->pos;
+ if (!pos)
+ pos = decoder->config.begin;
+
+ if (pos == sync)
+ pos += ptps_psb;
+
+ errcode = pt_sync_forward(&sync, pos, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->sync = sync;
+ decoder->pos = sync;
+
+ return 0;
+}
+
+int pt_pkt_sync_backward(struct pt_packet_decoder *decoder)
+{
+ const uint8_t *pos, *sync;
+ int errcode;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ pos = decoder->pos;
+ if (!pos)
+ pos = decoder->config.end;
+
+ errcode = pt_sync_backward(&sync, pos, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->sync = sync;
+ decoder->pos = sync;
+
+ return 0;
+}
+
+int pt_pkt_sync_set(struct pt_packet_decoder *decoder, uint64_t offset)
+{
+ const uint8_t *begin, *end, *pos;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ begin = decoder->config.begin;
+ end = decoder->config.end;
+ pos = begin + offset;
+
+ if (end < pos || pos < begin)
+ return -pte_eos;
+
+ decoder->sync = pos;
+ decoder->pos = pos;
+
+ return 0;
+}
+
+int pt_pkt_get_offset(const struct pt_packet_decoder *decoder, uint64_t *offset)
+{
+ const uint8_t *begin, *pos;
+
+ if (!decoder || !offset)
+ return -pte_invalid;
+
+ begin = decoder->config.begin;
+ pos = decoder->pos;
+
+ if (!pos)
+ return -pte_nosync;
+
+ *offset = pos - begin;
+ return 0;
+}
+
+int pt_pkt_get_sync_offset(const struct pt_packet_decoder *decoder,
+ uint64_t *offset)
+{
+ const uint8_t *begin, *sync;
+
+ if (!decoder || !offset)
+ return -pte_invalid;
+
+ begin = decoder->config.begin;
+ sync = decoder->sync;
+
+ if (!sync)
+ return -pte_nosync;
+
+ *offset = sync - begin;
+ return 0;
+}
+
+const struct pt_config *
+pt_pkt_get_config(const struct pt_packet_decoder *decoder)
+{
+ if (!decoder)
+ return NULL;
+
+ return &decoder->config;
+}
+
+static inline int pkt_to_user(struct pt_packet *upkt, size_t size,
+ const struct pt_packet *pkt)
+{
+ if (!upkt || !pkt)
+ return -pte_internal;
+
+ if (upkt == pkt)
+ return 0;
+
+ /* Zero out any unknown bytes. */
+ if (sizeof(*pkt) < size) {
+ memset(upkt + sizeof(*pkt), 0, size - sizeof(*pkt));
+
+ size = sizeof(*pkt);
+ }
+
+ memcpy(upkt, pkt, size);
+
+ return 0;
+}
+
+int pt_pkt_next(struct pt_packet_decoder *decoder, struct pt_packet *packet,
+ size_t psize)
+{
+ const struct pt_decoder_function *dfun;
+ struct pt_packet pkt, *ppkt;
+ int errcode, size;
+
+ if (!packet || !decoder)
+ return -pte_invalid;
+
+ ppkt = psize == sizeof(pkt) ? packet : &pkt;
+
+ errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ if (!dfun)
+ return -pte_internal;
+
+ if (!dfun->packet)
+ return -pte_internal;
+
+ size = dfun->packet(decoder, ppkt);
+ if (size < 0)
+ return size;
+
+ errcode = pkt_to_user(packet, psize, ppkt);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+
+ return size;
+}
+
+int pt_pkt_decode_unknown(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_unknown(packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ return size;
+}
+
+int pt_pkt_decode_pad(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ (void) decoder;
+
+ if (!packet)
+ return -pte_internal;
+
+ packet->type = ppt_pad;
+ packet->size = ptps_pad;
+
+ return ptps_pad;
+}
+
+int pt_pkt_decode_psb(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_psb(decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_psb;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_tip(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_ip(&packet->payload.ip, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tip;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_tnt_8(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_tnt_8(&packet->payload.tnt, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tnt_8;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_tnt_64(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_tnt_64(&packet->payload.tnt, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tnt_64;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_tip_pge(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_ip(&packet->payload.ip, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tip_pge;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_tip_pgd(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_ip(&packet->payload.ip, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tip_pgd;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_fup(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_ip(&packet->payload.ip, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_fup;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_pip(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_pip(&packet->payload.pip, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_pip;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_ovf(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ (void) decoder;
+
+ if (!packet)
+ return -pte_internal;
+
+ packet->type = ppt_ovf;
+ packet->size = ptps_ovf;
+
+ return ptps_ovf;
+}
+
+int pt_pkt_decode_mode(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_mode(&packet->payload.mode, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_mode;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_psbend(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ (void) decoder;
+
+ if (!packet)
+ return -pte_internal;
+
+ packet->type = ppt_psbend;
+ packet->size = ptps_psbend;
+
+ return ptps_psbend;
+}
+
+int pt_pkt_decode_tsc(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_tsc(&packet->payload.tsc, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tsc;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_cbr(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_cbr(&packet->payload.cbr, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_cbr;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_tma(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_tma(&packet->payload.tma, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_tma;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_mtc(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_mtc(&packet->payload.mtc, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_mtc;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_cyc(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_cyc(&packet->payload.cyc, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_cyc;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_stop(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ (void) decoder;
+
+ if (!packet)
+ return -pte_internal;
+
+ packet->type = ppt_stop;
+ packet->size = ptps_stop;
+
+ return ptps_stop;
+}
+
+int pt_pkt_decode_vmcs(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_vmcs(&packet->payload.vmcs, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_vmcs;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_mnt(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_mnt(&packet->payload.mnt, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_mnt;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_exstop(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_exstop(&packet->payload.exstop, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_exstop;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_mwait(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_mwait(&packet->payload.mwait, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_mwait;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_pwre(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_pwre(&packet->payload.pwre, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_pwre;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_pwrx(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_pwrx(&packet->payload.pwrx, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_pwrx;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
+
+int pt_pkt_decode_ptw(struct pt_packet_decoder *decoder,
+ struct pt_packet *packet)
+{
+ int size;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ size = pt_pkt_read_ptw(&packet->payload.ptw, decoder->pos,
+ &decoder->config);
+ if (size < 0)
+ return size;
+
+ packet->type = ppt_ptw;
+ packet->size = (uint8_t) size;
+
+ return size;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_query_decoder.c b/contrib/processor-trace/libipt/src/pt_query_decoder.c
new file mode 100644
index 0000000000000..1941ae4d2f47a
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_query_decoder.c
@@ -0,0 +1,3630 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_query_decoder.h"
+#include "pt_sync.h"
+#include "pt_decoder_function.h"
+#include "pt_packet.h"
+#include "pt_packet_decoder.h"
+#include "pt_config.h"
+#include "pt_opcodes.h"
+#include "pt_compiler.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+
+
+/* Find a FUP in a PSB+ header.
+ *
+ * The packet @decoder must be synchronized onto the trace stream at the
+ * beginning or somewhere inside a PSB+ header.
+ *
+ * It uses @packet to hold trace packets during its search. If the search is
+ * successful, @packet will contain the first (and hopefully only) FUP packet in
+ * this PSB+. Otherwise, @packet may contain anything.
+ *
+ * Returns one if a FUP packet is found (@packet will contain it).
+ * Returns zero if no FUP packet is found (@packet is undefined).
+ * Returns a negative error code otherwise.
+ */
+static int pt_qry_find_header_fup(struct pt_packet *packet,
+ struct pt_packet_decoder *decoder)
+{
+ if (!packet || !decoder)
+ return -pte_internal;
+
+ for (;;) {
+ int errcode;
+
+ errcode = pt_pkt_next(decoder, packet, sizeof(*packet));
+ if (errcode < 0)
+ return errcode;
+
+ switch (packet->type) {
+ default:
+ /* Ignore the packet. */
+ break;
+
+ case ppt_psbend:
+ /* There's no FUP in here. */
+ return 0;
+
+ case ppt_fup:
+ /* Found it. */
+ return 1;
+ }
+ }
+}
+
+int pt_qry_decoder_init(struct pt_query_decoder *decoder,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ memset(decoder, 0, sizeof(*decoder));
+
+ errcode = pt_config_from_user(&decoder->config, config);
+ if (errcode < 0)
+ return errcode;
+
+ pt_last_ip_init(&decoder->ip);
+ pt_tnt_cache_init(&decoder->tnt);
+ pt_time_init(&decoder->time);
+ pt_time_init(&decoder->last_time);
+ pt_tcal_init(&decoder->tcal);
+ pt_evq_init(&decoder->evq);
+
+ return 0;
+}
+
+struct pt_query_decoder *pt_qry_alloc_decoder(const struct pt_config *config)
+{
+ struct pt_query_decoder *decoder;
+ int errcode;
+
+ decoder = malloc(sizeof(*decoder));
+ if (!decoder)
+ return NULL;
+
+ errcode = pt_qry_decoder_init(decoder, config);
+ if (errcode < 0) {
+ free(decoder);
+ return NULL;
+ }
+
+ return decoder;
+}
+
+void pt_qry_decoder_fini(struct pt_query_decoder *decoder)
+{
+ (void) decoder;
+
+ /* Nothing to do. */
+}
+
+void pt_qry_free_decoder(struct pt_query_decoder *decoder)
+{
+ pt_qry_decoder_fini(decoder);
+ free(decoder);
+}
+
+static void pt_qry_reset(struct pt_query_decoder *decoder)
+{
+ if (!decoder)
+ return;
+
+ decoder->enabled = 0;
+ decoder->consume_packet = 0;
+ decoder->event = NULL;
+
+ pt_last_ip_init(&decoder->ip);
+ pt_tnt_cache_init(&decoder->tnt);
+ pt_time_init(&decoder->time);
+ pt_time_init(&decoder->last_time);
+ pt_tcal_init(&decoder->tcal);
+ pt_evq_init(&decoder->evq);
+}
+
+static int pt_qry_will_event(const struct pt_query_decoder *decoder)
+{
+ const struct pt_decoder_function *dfun;
+
+ if (!decoder)
+ return -pte_internal;
+
+ dfun = decoder->next;
+ if (!dfun)
+ return 0;
+
+ if (dfun->flags & pdff_event)
+ return 1;
+
+ if (dfun->flags & pdff_psbend)
+ return pt_evq_pending(&decoder->evq, evb_psbend);
+
+ if (dfun->flags & pdff_tip)
+ return pt_evq_pending(&decoder->evq, evb_tip);
+
+ if (dfun->flags & pdff_fup)
+ return pt_evq_pending(&decoder->evq, evb_fup);
+
+ return 0;
+}
+
+static int pt_qry_will_eos(const struct pt_query_decoder *decoder)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ dfun = decoder->next;
+ if (dfun)
+ return 0;
+
+ /* The decoding function may be NULL for two reasons:
+ *
+ * - we ran out of trace
+ * - we ran into a fetch error such as -pte_bad_opc
+ *
+ * Let's fetch again.
+ */
+ errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config);
+ return errcode == -pte_eos;
+}
+
+static int pt_qry_status_flags(const struct pt_query_decoder *decoder)
+{
+ int flags = 0;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* Some packets force out TNT and any deferred TIPs in order to
+ * establish the correct context for the subsequent packet.
+ *
+ * Users are expected to first navigate to the correct code region
+ * by using up the cached TNT bits before interpreting any subsequent
+ * packets.
+ *
+ * We do need to read ahead in order to signal upcoming events. We may
+ * have already decoded those packets while our user has not navigated
+ * to the correct code region, yet.
+ *
+ * In order to have our user use up the cached TNT bits first, we do
+ * not indicate the next event until the TNT cache is empty.
+ */
+ if (pt_tnt_cache_is_empty(&decoder->tnt)) {
+ if (pt_qry_will_event(decoder))
+ flags |= pts_event_pending;
+
+ if (pt_qry_will_eos(decoder))
+ flags |= pts_eos;
+ }
+
+ return flags;
+}
+
+static int pt_qry_provoke_fetch_error(const struct pt_query_decoder *decoder)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* Repeat the decoder fetch to reproduce the error. */
+ errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ /* We must get some error or something's wrong. */
+ return -pte_internal;
+}
+
+static int pt_qry_read_ahead(struct pt_query_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ for (;;) {
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ errcode = pt_df_fetch(&decoder->next, decoder->pos,
+ &decoder->config);
+ if (errcode)
+ return errcode;
+
+ dfun = decoder->next;
+ if (!dfun)
+ return -pte_internal;
+
+ if (!dfun->decode)
+ return -pte_internal;
+
+ /* We're done once we reach
+ *
+ * - a branching related packet. */
+ if (dfun->flags & (pdff_tip | pdff_tnt))
+ return 0;
+
+ /* - an event related packet. */
+ if (pt_qry_will_event(decoder))
+ return 0;
+
+ /* Decode status update packets. */
+ errcode = dfun->decode(decoder);
+ if (errcode) {
+ /* Ignore truncated status packets at the end.
+ *
+ * Move beyond the packet and clear @decoder->next to
+ * indicate that we were not able to fetch the next
+ * packet.
+ */
+ if (errcode == -pte_eos) {
+ decoder->pos = decoder->config.end;
+ decoder->next = NULL;
+ }
+
+ return errcode;
+ }
+ }
+}
+
+static int pt_qry_start(struct pt_query_decoder *decoder, const uint8_t *pos,
+ uint64_t *addr)
+{
+ const struct pt_decoder_function *dfun;
+ int status, errcode;
+
+ if (!decoder || !pos)
+ return -pte_invalid;
+
+ pt_qry_reset(decoder);
+
+ decoder->sync = pos;
+ decoder->pos = pos;
+
+ errcode = pt_df_fetch(&decoder->next, pos, &decoder->config);
+ if (errcode)
+ return errcode;
+
+ dfun = decoder->next;
+
+ /* We do need to start at a PSB in order to initialize the state. */
+ if (dfun != &pt_decode_psb)
+ return -pte_nosync;
+
+ /* Decode the PSB+ header to initialize the state. */
+ errcode = dfun->decode(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ /* Fill in the start address.
+ * We do this before reading ahead since the latter may read an
+ * adjacent PSB+ that might change the decoder's IP, causing us
+ * to skip code.
+ */
+ if (addr) {
+ status = pt_last_ip_query(addr, &decoder->ip);
+
+ /* Make sure we don't clobber it later on. */
+ if (!status)
+ addr = NULL;
+ }
+
+ /* Read ahead until the first query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if (errcode < 0)
+ return errcode;
+
+ /* We return the current decoder status. */
+ status = pt_qry_status_flags(decoder);
+ if (status < 0)
+ return status;
+
+ errcode = pt_last_ip_query(addr, &decoder->ip);
+ if (errcode < 0) {
+ /* Indicate the missing IP in the status. */
+ if (addr)
+ status |= pts_ip_suppressed;
+ }
+
+ return status;
+}
+
+static int pt_qry_apply_tsc(struct pt_time *time, struct pt_time_cal *tcal,
+ const struct pt_packet_tsc *packet,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_update_tsc(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_tsc(time, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+static int pt_qry_apply_header_tsc(struct pt_time *time,
+ struct pt_time_cal *tcal,
+ const struct pt_packet_tsc *packet,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_header_tsc(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_tsc(time, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+static int pt_qry_apply_cbr(struct pt_time *time, struct pt_time_cal *tcal,
+ const struct pt_packet_cbr *packet,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_update_cbr(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_cbr(time, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+static int pt_qry_apply_header_cbr(struct pt_time *time,
+ struct pt_time_cal *tcal,
+ const struct pt_packet_cbr *packet,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_header_cbr(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_cbr(time, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+static int pt_qry_apply_tma(struct pt_time *time, struct pt_time_cal *tcal,
+ const struct pt_packet_tma *packet,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_update_tma(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_tma(time, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+static int pt_qry_apply_mtc(struct pt_time *time, struct pt_time_cal *tcal,
+ const struct pt_packet_mtc *packet,
+ const struct pt_config *config)
+{
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_update_mtc(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_mtc(time, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+static int pt_qry_apply_cyc(struct pt_time *time, struct pt_time_cal *tcal,
+ const struct pt_packet_cyc *packet,
+ const struct pt_config *config)
+{
+ uint64_t fcr;
+ int errcode;
+
+ /* We ignore configuration errors. They will result in imprecise
+ * calibration which will result in imprecise cycle-accurate timing.
+ *
+ * We currently do not track them.
+ */
+ errcode = pt_tcal_update_cyc(tcal, packet, config);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ /* We need the FastCounter to Cycles ratio below. Fall back to
+ * an invalid ratio of 0 if calibration has not kicked in, yet.
+ *
+ * This will be tracked as packet loss in struct pt_time.
+ */
+ errcode = pt_tcal_fcr(&fcr, tcal);
+ if (errcode < 0) {
+ if (errcode == -pte_no_time)
+ fcr = 0ull;
+ else
+ return errcode;
+ }
+
+ /* We ignore configuration errors. They will result in imprecise
+ * timing and are tracked as packet losses in struct pt_time.
+ */
+ errcode = pt_time_update_cyc(time, packet, config, fcr);
+ if (errcode < 0 && (errcode != -pte_bad_config))
+ return errcode;
+
+ return 0;
+}
+
+int pt_qry_sync_forward(struct pt_query_decoder *decoder, uint64_t *ip)
+{
+ const uint8_t *pos, *sync;
+ int errcode;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ sync = decoder->sync;
+ pos = decoder->pos;
+ if (!pos)
+ pos = decoder->config.begin;
+
+ if (pos == sync)
+ pos += ptps_psb;
+
+ errcode = pt_sync_forward(&sync, pos, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ return pt_qry_start(decoder, sync, ip);
+}
+
+int pt_qry_sync_backward(struct pt_query_decoder *decoder, uint64_t *ip)
+{
+ const uint8_t *start, *sync;
+ int errcode;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ start = decoder->pos;
+ if (!start)
+ start = decoder->config.end;
+
+ sync = start;
+ for (;;) {
+ errcode = pt_sync_backward(&sync, sync, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_qry_start(decoder, sync, ip);
+ if (errcode < 0) {
+ /* Ignore incomplete trace segments at the end. We need
+ * a full PSB+ to start decoding.
+ */
+ if (errcode == -pte_eos)
+ continue;
+
+ return errcode;
+ }
+
+ /* An empty trace segment in the middle of the trace might bring
+ * us back to where we started.
+ *
+ * We're done when we reached a new position.
+ */
+ if (decoder->pos != start)
+ break;
+ }
+
+ return 0;
+}
+
+int pt_qry_sync_set(struct pt_query_decoder *decoder, uint64_t *ip,
+ uint64_t offset)
+{
+ const uint8_t *sync, *pos;
+ int errcode;
+
+ if (!decoder)
+ return -pte_invalid;
+
+ pos = decoder->config.begin + offset;
+
+ errcode = pt_sync_set(&sync, pos, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ return pt_qry_start(decoder, sync, ip);
+}
+
+int pt_qry_get_offset(const struct pt_query_decoder *decoder, uint64_t *offset)
+{
+ const uint8_t *begin, *pos;
+
+ if (!decoder || !offset)
+ return -pte_invalid;
+
+ begin = decoder->config.begin;
+ pos = decoder->pos;
+
+ if (!pos)
+ return -pte_nosync;
+
+ *offset = pos - begin;
+ return 0;
+}
+
+int pt_qry_get_sync_offset(const struct pt_query_decoder *decoder,
+ uint64_t *offset)
+{
+ const uint8_t *begin, *sync;
+
+ if (!decoder || !offset)
+ return -pte_invalid;
+
+ begin = decoder->config.begin;
+ sync = decoder->sync;
+
+ if (!sync)
+ return -pte_nosync;
+
+ *offset = sync - begin;
+ return 0;
+}
+
+const struct pt_config *
+pt_qry_get_config(const struct pt_query_decoder *decoder)
+{
+ if (!decoder)
+ return NULL;
+
+ return &decoder->config;
+}
+
+static int pt_qry_cache_tnt(struct pt_query_decoder *decoder)
+{
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ for (;;) {
+ const struct pt_decoder_function *dfun;
+
+ dfun = decoder->next;
+ if (!dfun)
+ return pt_qry_provoke_fetch_error(decoder);
+
+ if (!dfun->decode)
+ return -pte_internal;
+
+ /* There's an event ahead of us. */
+ if (pt_qry_will_event(decoder))
+ return -pte_bad_query;
+
+ /* Diagnose a TIP that has not been part of an event. */
+ if (dfun->flags & pdff_tip)
+ return -pte_bad_query;
+
+ /* Clear the decoder's current event so we know when we
+ * accidentally skipped an event.
+ */
+ decoder->event = NULL;
+
+ /* Apply the decoder function. */
+ errcode = dfun->decode(decoder);
+ if (errcode)
+ return errcode;
+
+ /* If we skipped an event, we're in trouble. */
+ if (decoder->event)
+ return -pte_event_ignored;
+
+ /* We're done when we decoded a TNT packet. */
+ if (dfun->flags & pdff_tnt)
+ break;
+
+ /* Read ahead until the next query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if (errcode)
+ return errcode;
+ }
+
+ /* Preserve the time at the TNT packet. */
+ decoder->last_time = decoder->time;
+
+ /* Read ahead until the next query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if ((errcode < 0) && (errcode != -pte_eos))
+ return errcode;
+
+ return 0;
+}
+
+int pt_qry_cond_branch(struct pt_query_decoder *decoder, int *taken)
+{
+ int errcode, query;
+
+ if (!decoder || !taken)
+ return -pte_invalid;
+
+ /* We cache the latest tnt packet in the decoder. Let's re-fill the
+ * cache in case it is empty.
+ */
+ if (pt_tnt_cache_is_empty(&decoder->tnt)) {
+ errcode = pt_qry_cache_tnt(decoder);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ query = pt_tnt_cache_query(&decoder->tnt);
+ if (query < 0)
+ return query;
+
+ *taken = query;
+
+ return pt_qry_status_flags(decoder);
+}
+
+int pt_qry_indirect_branch(struct pt_query_decoder *decoder, uint64_t *addr)
+{
+ int errcode, flags;
+
+ if (!decoder || !addr)
+ return -pte_invalid;
+
+ flags = 0;
+ for (;;) {
+ const struct pt_decoder_function *dfun;
+
+ dfun = decoder->next;
+ if (!dfun)
+ return pt_qry_provoke_fetch_error(decoder);
+
+ if (!dfun->decode)
+ return -pte_internal;
+
+ /* There's an event ahead of us. */
+ if (pt_qry_will_event(decoder))
+ return -pte_bad_query;
+
+ /* Clear the decoder's current event so we know when we
+ * accidentally skipped an event.
+ */
+ decoder->event = NULL;
+
+ /* We may see a single TNT packet if the current tnt is empty.
+ *
+ * If we see a TNT while the current tnt is not empty, it means
+ * that our user got out of sync. Let's report no data and hope
+ * that our user is able to re-sync.
+ */
+ if ((dfun->flags & pdff_tnt) &&
+ !pt_tnt_cache_is_empty(&decoder->tnt))
+ return -pte_bad_query;
+
+ /* Apply the decoder function. */
+ errcode = dfun->decode(decoder);
+ if (errcode)
+ return errcode;
+
+ /* If we skipped an event, we're in trouble. */
+ if (decoder->event)
+ return -pte_event_ignored;
+
+ /* We're done when we found a TIP packet that isn't part of an
+ * event.
+ */
+ if (dfun->flags & pdff_tip) {
+ uint64_t ip;
+
+ /* We already decoded it, so the branch destination
+ * is stored in the decoder's last ip.
+ */
+ errcode = pt_last_ip_query(&ip, &decoder->ip);
+ if (errcode < 0)
+ flags |= pts_ip_suppressed;
+ else
+ *addr = ip;
+
+ break;
+ }
+
+ /* Read ahead until the next query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if (errcode)
+ return errcode;
+ }
+
+ /* Preserve the time at the TIP packet. */
+ decoder->last_time = decoder->time;
+
+ /* Read ahead until the next query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if ((errcode < 0) && (errcode != -pte_eos))
+ return errcode;
+
+ flags |= pt_qry_status_flags(decoder);
+
+ return flags;
+}
+
+int pt_qry_event(struct pt_query_decoder *decoder, struct pt_event *event,
+ size_t size)
+{
+ int errcode, flags;
+
+ if (!decoder || !event)
+ return -pte_invalid;
+
+ if (size < offsetof(struct pt_event, variant))
+ return -pte_invalid;
+
+ /* We do not allow querying for events while there are still TNT
+ * bits to consume.
+ */
+ if (!pt_tnt_cache_is_empty(&decoder->tnt))
+ return -pte_bad_query;
+
+ /* Do not provide more than we actually have. */
+ if (sizeof(*event) < size)
+ size = sizeof(*event);
+
+ flags = 0;
+ for (;;) {
+ const struct pt_decoder_function *dfun;
+
+ dfun = decoder->next;
+ if (!dfun)
+ return pt_qry_provoke_fetch_error(decoder);
+
+ if (!dfun->decode)
+ return -pte_internal;
+
+ /* We must not see a TIP or TNT packet unless it belongs
+ * to an event.
+ *
+ * If we see one, it means that our user got out of sync.
+ * Let's report no data and hope that our user is able
+ * to re-sync.
+ */
+ if ((dfun->flags & (pdff_tip | pdff_tnt)) &&
+ !pt_qry_will_event(decoder))
+ return -pte_bad_query;
+
+ /* Clear the decoder's current event so we know when decoding
+ * produces a new event.
+ */
+ decoder->event = NULL;
+
+ /* Apply any other decoder function. */
+ errcode = dfun->decode(decoder);
+ if (errcode)
+ return errcode;
+
+ /* Check if there has been an event.
+ *
+ * Some packets may result in events in some but not in all
+ * configurations.
+ */
+ if (decoder->event) {
+ (void) memcpy(event, decoder->event, size);
+ break;
+ }
+
+ /* Read ahead until the next query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if (errcode)
+ return errcode;
+ }
+
+ /* Preserve the time at the event. */
+ decoder->last_time = decoder->time;
+
+ /* Read ahead until the next query-relevant packet. */
+ errcode = pt_qry_read_ahead(decoder);
+ if ((errcode < 0) && (errcode != -pte_eos))
+ return errcode;
+
+ flags |= pt_qry_status_flags(decoder);
+
+ return flags;
+}
+
+int pt_qry_time(struct pt_query_decoder *decoder, uint64_t *time,
+ uint32_t *lost_mtc, uint32_t *lost_cyc)
+{
+ if (!decoder || !time)
+ return -pte_invalid;
+
+ return pt_time_query_tsc(time, lost_mtc, lost_cyc, &decoder->last_time);
+}
+
+int pt_qry_core_bus_ratio(struct pt_query_decoder *decoder, uint32_t *cbr)
+{
+ if (!decoder || !cbr)
+ return -pte_invalid;
+
+ return pt_time_query_cbr(cbr, &decoder->last_time);
+}
+
+static int pt_qry_event_time(struct pt_event *event,
+ const struct pt_query_decoder *decoder)
+{
+ int errcode;
+
+ if (!event || !decoder)
+ return -pte_internal;
+
+ errcode = pt_time_query_tsc(&event->tsc, &event->lost_mtc,
+ &event->lost_cyc, &decoder->time);
+ if (errcode < 0) {
+ if (errcode != -pte_no_time)
+ return errcode;
+ } else
+ event->has_tsc = 1;
+
+ return 0;
+}
+
+int pt_qry_decode_unknown(struct pt_query_decoder *decoder)
+{
+ struct pt_packet packet;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_unknown(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_pad(struct pt_query_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->pos += ptps_pad;
+
+ return 0;
+}
+
+static int pt_qry_read_psb_header(struct pt_query_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ pt_last_ip_init(&decoder->ip);
+
+ for (;;) {
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ errcode = pt_df_fetch(&decoder->next, decoder->pos,
+ &decoder->config);
+ if (errcode)
+ return errcode;
+
+ dfun = decoder->next;
+ if (!dfun)
+ return -pte_internal;
+
+ /* We're done once we reach an psbend packet. */
+ if (dfun->flags & pdff_psbend)
+ return 0;
+
+ if (!dfun->header)
+ return -pte_bad_context;
+
+ errcode = dfun->header(decoder);
+ if (errcode)
+ return errcode;
+ }
+}
+
+int pt_qry_decode_psb(struct pt_query_decoder *decoder)
+{
+ const uint8_t *pos;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ pos = decoder->pos;
+
+ size = pt_pkt_read_psb(pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ decoder->pos += size;
+
+ errcode = pt_qry_read_psb_header(decoder);
+ if (errcode < 0) {
+ /* Move back to the PSB so we have a chance to recover and
+ * continue decoding.
+ */
+ decoder->pos = pos;
+
+ /* Clear any PSB+ events that have already been queued. */
+ (void) pt_evq_clear(&decoder->evq, evb_psbend);
+
+ /* Reset the decoder's decode function. */
+ decoder->next = &pt_decode_psb;
+
+ return errcode;
+ }
+
+ /* The next packet following the PSB header will be of type PSBEND.
+ *
+ * Decoding this packet will publish the PSB events what have been
+ * accumulated while reading the PSB header.
+ */
+ return 0;
+}
+
+static int pt_qry_event_ip(uint64_t *ip, struct pt_event *event,
+ const struct pt_query_decoder *decoder)
+{
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ errcode = pt_last_ip_query(ip, &decoder->ip);
+ if (errcode < 0) {
+ switch (pt_errcode(errcode)) {
+ case pte_noip:
+ case pte_ip_suppressed:
+ event->ip_suppressed = 1;
+ break;
+
+ default:
+ return errcode;
+ }
+ }
+
+ return 0;
+}
+
+/* Decode a generic IP packet.
+ *
+ * Returns the number of bytes read, on success.
+ * Returns -pte_eos if the ip does not fit into the buffer.
+ * Returns -pte_bad_packet if the ip compression is not known.
+ */
+static int pt_qry_decode_ip(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_ip packet;
+ int errcode, size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ /* We do not update the decoder's position, yet. */
+
+ return size;
+}
+
+static int pt_qry_consume_tip(struct pt_query_decoder *decoder, int size)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->pos += size;
+ return 0;
+}
+
+static int pt_qry_event_tip(struct pt_event *ev,
+ struct pt_query_decoder *decoder)
+{
+ if (!ev || !decoder)
+ return -pte_internal;
+
+ switch (ev->type) {
+ case ptev_async_branch:
+ decoder->consume_packet = 1;
+
+ return pt_qry_event_ip(&ev->variant.async_branch.to, ev,
+ decoder);
+
+ case ptev_async_paging:
+ return pt_qry_event_ip(&ev->variant.async_paging.ip, ev,
+ decoder);
+
+ case ptev_async_vmcs:
+ return pt_qry_event_ip(&ev->variant.async_vmcs.ip, ev,
+ decoder);
+
+ case ptev_exec_mode:
+ return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev,
+ decoder);
+
+ default:
+ break;
+ }
+
+ return -pte_bad_context;
+}
+
+int pt_qry_decode_tip(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_qry_decode_ip(decoder);
+ if (size < 0)
+ return size;
+
+ /* Process any pending events binding to TIP. */
+ ev = pt_evq_dequeue(&decoder->evq, evb_tip);
+ if (ev) {
+ errcode = pt_qry_event_tip(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ /* Publish the event. */
+ decoder->event = ev;
+
+ /* Process further pending events. */
+ if (pt_evq_pending(&decoder->evq, evb_tip))
+ return 0;
+
+ /* No further events.
+ *
+ * If none of the events consumed the packet, we're done.
+ */
+ if (!decoder->consume_packet)
+ return 0;
+
+ /* We're done with this packet. Clear the flag we set previously
+ * and consume it.
+ */
+ decoder->consume_packet = 0;
+ }
+
+ return pt_qry_consume_tip(decoder, size);
+}
+
+int pt_qry_decode_tnt_8(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_tnt packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_tnt_8(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_tnt_64(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_tnt packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_tnt_64(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+static int pt_qry_consume_tip_pge(struct pt_query_decoder *decoder, int size)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->pos += size;
+ return 0;
+}
+
+static int pt_qry_event_tip_pge(struct pt_event *ev,
+ const struct pt_query_decoder *decoder)
+{
+ if (!ev)
+ return -pte_internal;
+
+ switch (ev->type) {
+ case ptev_exec_mode:
+ return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, decoder);
+
+ default:
+ break;
+ }
+
+ return -pte_bad_context;
+}
+
+int pt_qry_decode_tip_pge(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_qry_decode_ip(decoder);
+ if (size < 0)
+ return size;
+
+ /* We send the enable event first. This is more convenient for our users
+ * and does not require them to either store or blindly apply other
+ * events that might be pending.
+ *
+ * We use the consume packet decoder flag to indicate this.
+ */
+ if (!decoder->consume_packet) {
+ /* This packet signals a standalone enabled event. */
+ ev = pt_evq_standalone(&decoder->evq);
+ if (!ev)
+ return -pte_internal;
+
+ ev->type = ptev_enabled;
+
+ /* We can't afford having a suppressed IP here. */
+ errcode = pt_last_ip_query(&ev->variant.enabled.ip,
+ &decoder->ip);
+ if (errcode < 0)
+ return -pte_bad_packet;
+
+ errcode = pt_qry_event_time(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ /* Discard any cached TNT bits.
+ *
+ * They should have been consumed at the corresponding disable
+ * event. If they have not, for whatever reason, discard them
+ * now so our user does not get out of sync.
+ */
+ pt_tnt_cache_init(&decoder->tnt);
+
+ /* Process pending events next. */
+ decoder->consume_packet = 1;
+ decoder->enabled = 1;
+ } else {
+ /* Process any pending events binding to TIP. */
+ ev = pt_evq_dequeue(&decoder->evq, evb_tip);
+ if (ev) {
+ errcode = pt_qry_event_tip_pge(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+ }
+ }
+
+ /* We must have an event. Either the initial enable event or one of the
+ * queued events.
+ */
+ if (!ev)
+ return -pte_internal;
+
+ /* Publish the event. */
+ decoder->event = ev;
+
+ /* Process further pending events. */
+ if (pt_evq_pending(&decoder->evq, evb_tip))
+ return 0;
+
+ /* We must consume the packet. */
+ if (!decoder->consume_packet)
+ return -pte_internal;
+
+ decoder->consume_packet = 0;
+
+ return pt_qry_consume_tip_pge(decoder, size);
+}
+
+static int pt_qry_consume_tip_pgd(struct pt_query_decoder *decoder, int size)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->enabled = 0;
+ decoder->pos += size;
+ return 0;
+}
+
+static int pt_qry_event_tip_pgd(struct pt_event *ev,
+ const struct pt_query_decoder *decoder)
+{
+ if (!ev)
+ return -pte_internal;
+
+ switch (ev->type) {
+ case ptev_async_branch: {
+ uint64_t at;
+
+ /* Turn the async branch into an async disable. */
+ at = ev->variant.async_branch.from;
+
+ ev->type = ptev_async_disabled;
+ ev->variant.async_disabled.at = at;
+
+ return pt_qry_event_ip(&ev->variant.async_disabled.ip, ev,
+ decoder);
+ }
+
+ case ptev_async_paging:
+ case ptev_async_vmcs:
+ case ptev_exec_mode:
+ /* These events are ordered after the async disable event. It
+ * is not quite clear what IP to give them.
+ *
+ * If we give them the async disable's source IP, we'd make an
+ * error if the IP is updated when applying the async disable
+ * event.
+ *
+ * If we give them the async disable's destination IP, we'd make
+ * an error if the IP is not updated when applying the async
+ * disable event. That's what our decoders do since tracing is
+ * likely to resume from there.
+ *
+ * In all cases, tracing will be disabled when those events are
+ * applied, so we may as well suppress the IP.
+ */
+ ev->ip_suppressed = 1;
+
+ return 0;
+
+ default:
+ break;
+ }
+
+ return -pte_bad_context;
+}
+
+int pt_qry_decode_tip_pgd(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_qry_decode_ip(decoder);
+ if (size < 0)
+ return size;
+
+ /* Process any pending events binding to TIP. */
+ ev = pt_evq_dequeue(&decoder->evq, evb_tip);
+ if (ev) {
+ errcode = pt_qry_event_tip_pgd(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+ } else {
+ /* This packet signals a standalone disabled event. */
+ ev = pt_evq_standalone(&decoder->evq);
+ if (!ev)
+ return -pte_internal;
+ ev->type = ptev_disabled;
+
+ errcode = pt_qry_event_ip(&ev->variant.disabled.ip, ev,
+ decoder);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_qry_event_time(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ /* We must have an event. Either the initial enable event or one of the
+ * queued events.
+ */
+ if (!ev)
+ return -pte_internal;
+
+ /* Publish the event. */
+ decoder->event = ev;
+
+ /* Process further pending events. */
+ if (pt_evq_pending(&decoder->evq, evb_tip))
+ return 0;
+
+ return pt_qry_consume_tip_pgd(decoder, size);
+}
+
+static int pt_qry_consume_fup(struct pt_query_decoder *decoder, int size)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ decoder->pos += size;
+ return 0;
+}
+
+static int scan_for_erratum_bdm70(struct pt_packet_decoder *decoder)
+{
+ for (;;) {
+ struct pt_packet packet;
+ int errcode;
+
+ errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
+ if (errcode < 0) {
+ /* Running out of packets is not an error. */
+ if (errcode == -pte_eos)
+ errcode = 0;
+
+ return errcode;
+ }
+
+ switch (packet.type) {
+ default:
+ /* All other packets cancel our search.
+ *
+ * We do not enumerate those packets since we also
+ * want to include new packets.
+ */
+ return 0;
+
+ case ppt_tip_pge:
+ /* We found it - the erratum applies. */
+ return 1;
+
+ case ppt_pad:
+ case ppt_tsc:
+ case ppt_cbr:
+ case ppt_psbend:
+ case ppt_pip:
+ case ppt_mode:
+ case ppt_vmcs:
+ case ppt_tma:
+ case ppt_mtc:
+ case ppt_cyc:
+ case ppt_mnt:
+ /* Intentionally skip a few packets. */
+ continue;
+ }
+ }
+}
+
+static int check_erratum_bdm70(const uint8_t *pos,
+ const struct pt_config *config)
+{
+ struct pt_packet_decoder decoder;
+ int errcode;
+
+ if (!pos || !config)
+ return -pte_internal;
+
+ errcode = pt_pkt_decoder_init(&decoder, config);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_pkt_sync_set(&decoder, (uint64_t) (pos - config->begin));
+ if (errcode >= 0)
+ errcode = scan_for_erratum_bdm70(&decoder);
+
+ pt_pkt_decoder_fini(&decoder);
+ return errcode;
+}
+
+int pt_qry_header_fup(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_ip packet;
+ int errcode, size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ if (decoder->config.errata.bdm70 && !decoder->enabled) {
+ errcode = check_erratum_bdm70(decoder->pos + size,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ if (errcode)
+ return pt_qry_consume_fup(decoder, size);
+ }
+
+ errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ /* Tracing is enabled if we have an IP in the header. */
+ if (packet.ipc != pt_ipc_suppressed)
+ decoder->enabled = 1;
+
+ return pt_qry_consume_fup(decoder, size);
+}
+
+static int pt_qry_event_fup(struct pt_event *ev,
+ struct pt_query_decoder *decoder)
+{
+ if (!ev || !decoder)
+ return -pte_internal;
+
+ switch (ev->type) {
+ case ptev_overflow:
+ decoder->consume_packet = 1;
+
+ /* We can't afford having a suppressed IP here. */
+ return pt_last_ip_query(&ev->variant.overflow.ip,
+ &decoder->ip);
+
+ case ptev_tsx:
+ if (!(ev->variant.tsx.aborted))
+ decoder->consume_packet = 1;
+
+ return pt_qry_event_ip(&ev->variant.tsx.ip, ev, decoder);
+
+ case ptev_exstop:
+ decoder->consume_packet = 1;
+
+ return pt_qry_event_ip(&ev->variant.exstop.ip, ev, decoder);
+
+ case ptev_mwait:
+ decoder->consume_packet = 1;
+
+ return pt_qry_event_ip(&ev->variant.mwait.ip, ev, decoder);
+
+ case ptev_ptwrite:
+ decoder->consume_packet = 1;
+
+ return pt_qry_event_ip(&ev->variant.ptwrite.ip, ev, decoder);
+
+ default:
+ break;
+ }
+
+ return -pte_internal;
+}
+
+int pt_qry_decode_fup(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_qry_decode_ip(decoder);
+ if (size < 0)
+ return size;
+
+ /* Process any pending events binding to FUP. */
+ ev = pt_evq_dequeue(&decoder->evq, evb_fup);
+ if (ev) {
+ errcode = pt_qry_event_fup(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ /* Publish the event. */
+ decoder->event = ev;
+
+ /* Process further pending events. */
+ if (pt_evq_pending(&decoder->evq, evb_fup))
+ return 0;
+
+ /* No further events.
+ *
+ * If none of the events consumed the packet, we're done.
+ */
+ if (!decoder->consume_packet)
+ return 0;
+
+ /* We're done with this packet. Clear the flag we set previously
+ * and consume it.
+ */
+ decoder->consume_packet = 0;
+ } else {
+ /* FUP indicates an async branch event; it binds to TIP.
+ *
+ * We do need an IP in this case.
+ */
+ uint64_t ip;
+
+ errcode = pt_last_ip_query(&ip, &decoder->ip);
+ if (errcode < 0)
+ return errcode;
+
+ ev = pt_evq_enqueue(&decoder->evq, evb_tip);
+ if (!ev)
+ return -pte_nomem;
+
+ ev->type = ptev_async_branch;
+ ev->variant.async_branch.from = ip;
+
+ errcode = pt_qry_event_time(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return pt_qry_consume_fup(decoder, size);
+}
+
+int pt_qry_decode_pip(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_pip packet;
+ struct pt_event *event;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ /* Paging events are either standalone or bind to the same TIP packet
+ * as an in-flight async branch event.
+ */
+ event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch);
+ if (!event) {
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+ event->type = ptev_paging;
+ event->variant.paging.cr3 = packet.cr3;
+ event->variant.paging.non_root = packet.nr;
+
+ decoder->event = event;
+ } else {
+ event = pt_evq_enqueue(&decoder->evq, evb_tip);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_async_paging;
+ event->variant.async_paging.cr3 = packet.cr3;
+ event->variant.async_paging.non_root = packet.nr;
+ }
+
+ errcode = pt_qry_event_time(event, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_header_pip(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_pip packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ /* Paging events are reported at the end of the PSB. */
+ event = pt_evq_enqueue(&decoder->evq, evb_psbend);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_async_paging;
+ event->variant.async_paging.cr3 = packet.cr3;
+ event->variant.async_paging.non_root = packet.nr;
+
+ decoder->pos += size;
+ return 0;
+}
+
+static int pt_qry_event_psbend(struct pt_event *ev,
+ struct pt_query_decoder *decoder)
+{
+ int errcode;
+
+ if (!ev || !decoder)
+ return -pte_internal;
+
+ /* PSB+ events are status updates. */
+ ev->status_update = 1;
+
+ errcode = pt_qry_event_time(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ switch (ev->type) {
+ case ptev_async_paging:
+ return pt_qry_event_ip(&ev->variant.async_paging.ip, ev,
+ decoder);
+
+ case ptev_exec_mode:
+ return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, decoder);
+
+ case ptev_tsx:
+ return pt_qry_event_ip(&ev->variant.tsx.ip, ev, decoder);
+
+ case ptev_async_vmcs:
+ return pt_qry_event_ip(&ev->variant.async_vmcs.ip, ev,
+ decoder);
+
+ case ptev_cbr:
+ return 0;
+
+ case ptev_mnt:
+ /* Maintenance packets may appear anywhere. Do not mark them as
+ * status updates even if they appear in PSB+.
+ */
+ ev->status_update = 0;
+ return 0;
+
+ default:
+ break;
+ }
+
+ return -pte_internal;
+}
+
+static int pt_qry_process_pending_psb_events(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = pt_evq_dequeue(&decoder->evq, evb_psbend);
+ if (!ev)
+ return 0;
+
+ errcode = pt_qry_event_psbend(ev, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ /* Publish the event. */
+ decoder->event = ev;
+
+ /* Signal a pending event. */
+ return 1;
+}
+
+/* Create a standalone overflow event with tracing disabled.
+ *
+ * Creates and published the event and disables tracing in @decoder.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_qry_event_ovf_disabled(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = pt_evq_standalone(&decoder->evq);
+ if (!ev)
+ return -pte_internal;
+
+ ev->type = ptev_overflow;
+
+ /* We suppress the IP to indicate that tracing has been disabled before
+ * the overflow resolved. There can be several events before tracing is
+ * enabled again.
+ */
+ ev->ip_suppressed = 1;
+
+ decoder->enabled = 0;
+ decoder->event = ev;
+
+ return pt_qry_event_time(ev, decoder);
+}
+
+/* Queues an overflow event with tracing enabled.
+ *
+ * Creates and enqueues the event and enables tracing in @decoder.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int pt_qry_event_ovf_enabled(struct pt_query_decoder *decoder)
+{
+ struct pt_event *ev;
+
+ if (!decoder)
+ return -pte_internal;
+
+ ev = pt_evq_enqueue(&decoder->evq, evb_fup);
+ if (!ev)
+ return -pte_internal;
+
+ ev->type = ptev_overflow;
+
+ decoder->enabled = 1;
+
+ return pt_qry_event_time(ev, decoder);
+}
+
+/* Recover from SKD010.
+ *
+ * Creates and publishes an overflow event at @packet's IP payload.
+ *
+ * Further updates @decoder as follows:
+ *
+ * - set time tracking to @time and @tcal
+ * - set the position to @offset
+ * - set ip to @packet's IP payload
+ * - set tracing to be enabled
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int skd010_recover(struct pt_query_decoder *decoder,
+ const struct pt_packet_ip *packet,
+ const struct pt_time_cal *tcal,
+ const struct pt_time *time, uint64_t offset)
+{
+ struct pt_last_ip ip;
+ struct pt_event *ev;
+ int errcode;
+
+ if (!decoder || !packet || !tcal || !time)
+ return -pte_internal;
+
+ /* We use the decoder's IP. It should be newly initialized. */
+ ip = decoder->ip;
+
+ /* Extract the IP payload from the packet. */
+ errcode = pt_last_ip_update_ip(&ip, packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ /* Synthesize the overflow event. */
+ ev = pt_evq_standalone(&decoder->evq);
+ if (!ev)
+ return -pte_internal;
+
+ ev->type = ptev_overflow;
+
+ /* We do need a full IP. */
+ errcode = pt_last_ip_query(&ev->variant.overflow.ip, &ip);
+ if (errcode < 0)
+ return -pte_bad_context;
+
+ /* We continue decoding at the given offset. */
+ decoder->pos = decoder->config.begin + offset;
+
+ /* Tracing is enabled. */
+ decoder->enabled = 1;
+ decoder->ip = ip;
+
+ decoder->time = *time;
+ decoder->tcal = *tcal;
+
+ /* Publish the event. */
+ decoder->event = ev;
+
+ return pt_qry_event_time(ev, decoder);
+}
+
+/* Recover from SKD010 with tracing disabled.
+ *
+ * Creates and publishes a standalone overflow event.
+ *
+ * Further updates @decoder as follows:
+ *
+ * - set time tracking to @time and @tcal
+ * - set the position to @offset
+ * - set tracing to be disabled
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int skd010_recover_disabled(struct pt_query_decoder *decoder,
+ const struct pt_time_cal *tcal,
+ const struct pt_time *time, uint64_t offset)
+{
+ if (!decoder || !tcal || !time)
+ return -pte_internal;
+
+ decoder->time = *time;
+ decoder->tcal = *tcal;
+
+ /* We continue decoding at the given offset. */
+ decoder->pos = decoder->config.begin + offset;
+
+ return pt_qry_event_ovf_disabled(decoder);
+}
+
+/* Scan ahead for a packet at which to resume after an overflow.
+ *
+ * This function is called after an OVF without a corresponding FUP. This
+ * normally means that the overflow resolved while tracing was disabled.
+ *
+ * With erratum SKD010 it might also mean that the FUP (or TIP.PGE) was dropped.
+ * The overflow thus resolved while tracing was enabled (or tracing was enabled
+ * after the overflow resolved). Search for an indication whether tracing is
+ * enabled or disabled by scanning upcoming packets.
+ *
+ * If we can confirm that tracing is disabled, the erratum does not apply and we
+ * can continue normally.
+ *
+ * If we can confirm that tracing is enabled, the erratum applies and we try to
+ * recover by synchronizing at a later packet and a different IP. If we can't
+ * recover, pretend the erratum didn't apply so we run into the error later.
+ * Since this assumes that tracing is disabled, no harm should be done, i.e. no
+ * bad trace should be generated.
+ *
+ * Returns zero if the overflow is handled.
+ * Returns a positive value if the overflow is not yet handled.
+ * Returns a negative error code otherwise.
+ */
+static int skd010_scan_for_ovf_resume(struct pt_packet_decoder *pkt,
+ struct pt_query_decoder *decoder)
+{
+ struct pt_time_cal tcal;
+ struct pt_time time;
+ struct {
+ struct pt_time_cal tcal;
+ struct pt_time time;
+ uint64_t offset;
+ } mode_tsx;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* Keep track of time as we skip packets. */
+ time = decoder->time;
+ tcal = decoder->tcal;
+
+ /* Keep track of a potential recovery point at MODE.TSX. */
+ memset(&mode_tsx, 0, sizeof(mode_tsx));
+
+ for (;;) {
+ struct pt_packet packet;
+ uint64_t offset;
+
+ errcode = pt_pkt_get_offset(pkt, &offset);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
+ if (errcode < 0) {
+ /* Let's assume the trace is correct if we run out
+ * of packets.
+ */
+ if (errcode == -pte_eos)
+ errcode = 1;
+
+ return errcode;
+ }
+
+ switch (packet.type) {
+ case ppt_tip_pge:
+ /* Everything is fine. There is nothing to do. */
+ return 1;
+
+ case ppt_tip_pgd:
+ /* This is a clear indication that the erratum
+ * applies.
+ *
+ * We synchronize after the disable.
+ */
+ return skd010_recover_disabled(decoder, &tcal, &time,
+ offset + packet.size);
+
+ case ppt_tnt_8:
+ case ppt_tnt_64:
+ /* This is a clear indication that the erratum
+ * apllies.
+ *
+ * Yet, we can't recover from it as we wouldn't know how
+ * many TNT bits will have been used when we eventually
+ * find an IP packet at which to resume tracing.
+ */
+ return 1;
+
+ case ppt_pip:
+ case ppt_vmcs:
+ /* We could track those changes and synthesize extra
+ * events after the overflow event when recovering from
+ * the erratum. This requires infrastructure that we
+ * don't currently have, though, so we're not going to
+ * do it.
+ *
+ * Instead, we ignore those changes. We already don't
+ * know how many other changes were lost in the
+ * overflow.
+ */
+ break;
+
+ case ppt_mode:
+ switch (packet.payload.mode.leaf) {
+ case pt_mol_exec:
+ /* A MODE.EXEC packet binds to TIP, i.e.
+ *
+ * TIP.PGE: everything is fine
+ * TIP: the erratum applies
+ *
+ * In the TIP.PGE case, we may just follow the
+ * normal code flow.
+ *
+ * In the TIP case, we'd be able to re-sync at
+ * the TIP IP but have to skip packets up to and
+ * including the TIP.
+ *
+ * We'd need to synthesize the MODE.EXEC event
+ * after the overflow event when recovering at
+ * the TIP. We lack the infrastructure for this
+ * - it's getting too complicated.
+ *
+ * Instead, we ignore the execution mode change;
+ * we already don't know how many more such
+ * changes were lost in the overflow.
+ */
+ break;
+
+ case pt_mol_tsx:
+ /* A MODE.TSX packet may be standalone or bind
+ * to FUP.
+ *
+ * If this is the second MODE.TSX, we're sure
+ * that tracing is disabled and everything is
+ * fine.
+ */
+ if (mode_tsx.offset)
+ return 1;
+
+ /* If we find the FUP this packet binds to, we
+ * may recover at the FUP IP and restart
+ * processing packets from here. Remember the
+ * current state.
+ */
+ mode_tsx.offset = offset;
+ mode_tsx.time = time;
+ mode_tsx.tcal = tcal;
+
+ break;
+ }
+
+ break;
+
+ case ppt_fup:
+ /* This is a pretty good indication that tracing
+ * is indeed enabled and the erratum applies.
+ */
+
+ /* If we got a MODE.TSX packet before, we synchronize at
+ * the FUP IP but continue decoding packets starting
+ * from the MODE.TSX.
+ */
+ if (mode_tsx.offset)
+ return skd010_recover(decoder,
+ &packet.payload.ip,
+ &mode_tsx.tcal,
+ &mode_tsx.time,
+ mode_tsx.offset);
+
+ /* Without a preceding MODE.TSX, this FUP is the start
+ * of an async branch or disable. We synchronize at the
+ * FUP IP and continue decoding packets from here.
+ */
+ return skd010_recover(decoder, &packet.payload.ip,
+ &tcal, &time, offset);
+
+ case ppt_tip:
+ /* We syhchronize at the TIP IP and continue decoding
+ * packets after the TIP packet.
+ */
+ return skd010_recover(decoder, &packet.payload.ip,
+ &tcal, &time,
+ offset + packet.size);
+
+ case ppt_psb:
+ /* We reached a synchronization point. Tracing is
+ * enabled if and only if the PSB+ contains a FUP.
+ */
+ errcode = pt_qry_find_header_fup(&packet, pkt);
+ if (errcode < 0) {
+ /* If we ran out of packets, we can't tell.
+ * Let's assume the trace is correct.
+ */
+ if (errcode == -pte_eos)
+ errcode = 1;
+
+ return errcode;
+ }
+
+ /* If there is no FUP, tracing is disabled and
+ * everything is fine.
+ */
+ if (!errcode)
+ return 1;
+
+ /* We should have a FUP. */
+ if (packet.type != ppt_fup)
+ return -pte_internal;
+
+ /* Otherwise, we may synchronize at the FUP IP and
+ * continue decoding packets at the PSB.
+ */
+ return skd010_recover(decoder, &packet.payload.ip,
+ &tcal, &time, offset);
+
+ case ppt_psbend:
+ /* We shouldn't see this. */
+ return -pte_bad_context;
+
+ case ppt_ovf:
+ case ppt_stop:
+ /* It doesn't matter if it had been enabled or disabled
+ * before. We may resume normally.
+ */
+ return 1;
+
+ case ppt_unknown:
+ case ppt_invalid:
+ /* We can't skip this packet. */
+ return 1;
+
+ case ppt_pad:
+ case ppt_mnt:
+ case ppt_pwre:
+ case ppt_pwrx:
+ /* Ignore this packet. */
+ break;
+
+ case ppt_exstop:
+ /* We may skip a stand-alone EXSTOP. */
+ if (!packet.payload.exstop.ip)
+ break;
+
+ fallthrough;
+ case ppt_mwait:
+ /* To skip this packet, we'd need to take care of the
+ * FUP it binds to. This is getting complicated.
+ */
+ return 1;
+
+ case ppt_ptw:
+ /* We may skip a stand-alone PTW. */
+ if (!packet.payload.ptw.ip)
+ break;
+
+ /* To skip this packet, we'd need to take care of the
+ * FUP it binds to. This is getting complicated.
+ */
+ return 1;
+
+ case ppt_tsc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_tsc(&time, &tcal,
+ &packet.payload.tsc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_cbr:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_cbr(&time, &tcal,
+ &packet.payload.cbr,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_tma:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_tma(&time, &tcal,
+ &packet.payload.tma,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_mtc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_mtc(&time, &tcal,
+ &packet.payload.mtc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_cyc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_cyc(&time, &tcal,
+ &packet.payload.cyc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+ }
+ }
+}
+
+static int pt_qry_handle_skd010(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_decoder pkt;
+ uint64_t offset;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ errcode = pt_qry_get_offset(decoder, &offset);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_pkt_decoder_init(&pkt, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_pkt_sync_set(&pkt, offset);
+ if (errcode >= 0)
+ errcode = skd010_scan_for_ovf_resume(&pkt, decoder);
+
+ pt_pkt_decoder_fini(&pkt);
+ return errcode;
+}
+
+/* Scan ahead for an indication whether tracing is enabled or disabled.
+ *
+ * Returns zero if tracing is clearly disabled.
+ * Returns a positive integer if tracing is enabled or if we can't tell.
+ * Returns a negative error code otherwise.
+ */
+static int apl12_tracing_is_disabled(struct pt_packet_decoder *decoder)
+{
+ if (!decoder)
+ return -pte_internal;
+
+ for (;;) {
+ struct pt_packet packet;
+ int status;
+
+ status = pt_pkt_next(decoder, &packet, sizeof(packet));
+ if (status < 0) {
+ /* Running out of packets is not an error. */
+ if (status == -pte_eos)
+ status = 1;
+
+ return status;
+ }
+
+ switch (packet.type) {
+ default:
+ /* Skip other packets. */
+ break;
+
+ case ppt_stop:
+ /* Tracing is disabled before a stop. */
+ return 0;
+
+ case ppt_tip_pge:
+ /* Tracing gets enabled - it must have been disabled. */
+ return 0;
+
+ case ppt_tnt_8:
+ case ppt_tnt_64:
+ case ppt_tip:
+ case ppt_tip_pgd:
+ /* Those packets are only generated when tracing is
+ * enabled. We're done.
+ */
+ return 1;
+
+ case ppt_psb:
+ /* We reached a synchronization point. Tracing is
+ * enabled if and only if the PSB+ contains a FUP.
+ */
+ status = pt_qry_find_header_fup(&packet, decoder);
+
+ /* If we ran out of packets, we can't tell. */
+ if (status == -pte_eos)
+ status = 1;
+
+ return status;
+
+ case ppt_psbend:
+ /* We shouldn't see this. */
+ return -pte_bad_context;
+
+ case ppt_ovf:
+ /* It doesn't matter - we run into the next overflow. */
+ return 1;
+
+ case ppt_unknown:
+ case ppt_invalid:
+ /* We can't skip this packet. */
+ return 1;
+ }
+ }
+}
+
+/* Apply workaround for erratum APL12.
+ *
+ * We resume from @offset (relative to @decoder->pos) with tracing disabled. On
+ * our way to the resume location we process packets to update our state.
+ *
+ * Any event will be dropped.
+ *
+ * Returns zero on success, a negative pt_error_code otherwise.
+ */
+static int apl12_resume_disabled(struct pt_query_decoder *decoder,
+ struct pt_packet_decoder *pkt,
+ unsigned int offset)
+{
+ uint64_t begin, end;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ errcode = pt_qry_get_offset(decoder, &begin);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_pkt_sync_set(pkt, begin);
+ if (errcode < 0)
+ return errcode;
+
+ end = begin + offset;
+ for (;;) {
+ struct pt_packet packet;
+ uint64_t next;
+
+ errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
+ if (errcode < 0) {
+ /* Running out of packets is not an error. */
+ if (errcode == -pte_eos)
+ errcode = 0;
+
+ return errcode;
+ }
+
+ /* The offset is the start of the next packet. */
+ errcode = pt_pkt_get_offset(pkt, &next);
+ if (errcode < 0)
+ return errcode;
+
+ /* We're done when we reach @offset.
+ *
+ * The current @packet will be the FUP after which we started
+ * our search. We skip it.
+ *
+ * Check that we're not accidentally proceeding past @offset.
+ */
+ if (end <= next) {
+ if (end < next)
+ return -pte_internal;
+
+ break;
+ }
+
+ switch (packet.type) {
+ default:
+ /* Skip other packets. */
+ break;
+
+ case ppt_mode:
+ case ppt_pip:
+ case ppt_vmcs:
+ /* We should not encounter those.
+ *
+ * We should not encounter a lot of packets but those
+ * are state-relevant; let's check them explicitly.
+ */
+ return -pte_internal;
+
+ case ppt_tsc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_tsc(&decoder->time,
+ &decoder->tcal,
+ &packet.payload.tsc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_cbr:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_cbr(&decoder->time,
+ &decoder->tcal,
+ &packet.payload.cbr,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_tma:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_tma(&decoder->time,
+ &decoder->tcal,
+ &packet.payload.tma,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_mtc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_mtc(&decoder->time,
+ &decoder->tcal,
+ &packet.payload.mtc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_cyc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_cyc(&decoder->time,
+ &decoder->tcal,
+ &packet.payload.cyc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+ }
+ }
+
+ decoder->pos += offset;
+
+ return pt_qry_event_ovf_disabled(decoder);
+}
+
+/* Handle erratum APL12.
+ *
+ * This function is called when a FUP is found after an OVF. The @offset
+ * argument gives the relative offset from @decoder->pos to after the FUP.
+ *
+ * A FUP after OVF normally indicates that the overflow resolved while tracing
+ * is enabled. Due to erratum APL12, however, the overflow may have resolved
+ * while tracing is disabled and still generate a FUP.
+ *
+ * We scan ahead for an indication whether tracing is actually disabled. If we
+ * find one, the erratum applies and we proceed from after the FUP packet.
+ *
+ * This will drop any CBR or MTC events. We will update @decoder's timing state
+ * on CBR but drop the event.
+ *
+ * Returns zero if the erratum was handled.
+ * Returns a positive integer if the erratum was not handled.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int pt_qry_handle_apl12(struct pt_query_decoder *decoder,
+ unsigned int offset)
+{
+ struct pt_packet_decoder pkt;
+ uint64_t here;
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_get_offset(decoder, &here);
+ if (status < 0)
+ return status;
+
+ status = pt_pkt_decoder_init(&pkt, &decoder->config);
+ if (status < 0)
+ return status;
+
+ status = pt_pkt_sync_set(&pkt, here + offset);
+ if (status >= 0) {
+ status = apl12_tracing_is_disabled(&pkt);
+ if (!status)
+ status = apl12_resume_disabled(decoder, &pkt, offset);
+ }
+
+ pt_pkt_decoder_fini(&pkt);
+ return status;
+}
+
+/* Apply workaround for erratum APL11.
+ *
+ * We search for a TIP.PGD and, if we found one, resume from after that packet
+ * with tracing disabled. On our way to the resume location we process packets
+ * to update our state.
+ *
+ * If we don't find a TIP.PGD but instead some other packet that indicates that
+ * tracing is disabled, indicate that the erratum does not apply.
+ *
+ * Any event will be dropped.
+ *
+ * Returns zero if the erratum was handled.
+ * Returns a positive integer if the erratum was not handled.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int apl11_apply(struct pt_query_decoder *decoder,
+ struct pt_packet_decoder *pkt)
+{
+ struct pt_time_cal tcal;
+ struct pt_time time;
+
+ if (!decoder)
+ return -pte_internal;
+
+ time = decoder->time;
+ tcal = decoder->tcal;
+ for (;;) {
+ struct pt_packet packet;
+ int errcode;
+
+ errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
+ if (errcode < 0)
+ return errcode;
+
+ switch (packet.type) {
+ case ppt_tip_pgd: {
+ uint64_t offset;
+
+ /* We found a TIP.PGD. The erratum applies.
+ *
+ * Resume from here with tracing disabled.
+ */
+ errcode = pt_pkt_get_offset(pkt, &offset);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->time = time;
+ decoder->tcal = tcal;
+ decoder->pos = decoder->config.begin + offset;
+
+ return pt_qry_event_ovf_disabled(decoder);
+ }
+
+ case ppt_invalid:
+ return -pte_bad_opc;
+
+ case ppt_fup:
+ case ppt_psb:
+ case ppt_tip_pge:
+ case ppt_stop:
+ case ppt_ovf:
+ case ppt_mode:
+ case ppt_pip:
+ case ppt_vmcs:
+ case ppt_exstop:
+ case ppt_mwait:
+ case ppt_pwre:
+ case ppt_pwrx:
+ case ppt_ptw:
+ /* The erratum does not apply. */
+ return 1;
+
+ case ppt_unknown:
+ case ppt_pad:
+ case ppt_mnt:
+ /* Skip those packets. */
+ break;
+
+ case ppt_psbend:
+ case ppt_tip:
+ case ppt_tnt_8:
+ case ppt_tnt_64:
+ return -pte_bad_context;
+
+
+ case ppt_tsc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_tsc(&time, &tcal,
+ &packet.payload.tsc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_cbr:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_cbr(&time, &tcal,
+ &packet.payload.cbr,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_tma:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_tma(&time, &tcal,
+ &packet.payload.tma,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_mtc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_mtc(&time, &tcal,
+ &packet.payload.mtc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+
+ case ppt_cyc:
+ /* Keep track of time. */
+ errcode = pt_qry_apply_cyc(&time, &tcal,
+ &packet.payload.cyc,
+ &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ break;
+ }
+ }
+}
+
+/* Handle erratum APL11.
+ *
+ * This function is called when we diagnose a bad packet while searching for a
+ * FUP after an OVF.
+ *
+ * Due to erratum APL11 we may get an extra TIP.PGD after the OVF. Find that
+ * TIP.PGD and resume from there with tracing disabled.
+ *
+ * This will drop any CBR or MTC events. We will update @decoder's timing state
+ * on CBR but drop the event.
+ *
+ * Returns zero if the erratum was handled.
+ * Returns a positive integer if the erratum was not handled.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int pt_qry_handle_apl11(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_decoder pkt;
+ uint64_t offset;
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_get_offset(decoder, &offset);
+ if (status < 0)
+ return status;
+
+ status = pt_pkt_decoder_init(&pkt, &decoder->config);
+ if (status < 0)
+ return status;
+
+ status = pt_pkt_sync_set(&pkt, offset);
+ if (status >= 0)
+ status = apl11_apply(decoder, &pkt);
+
+ pt_pkt_decoder_fini(&pkt);
+ return status;
+}
+
+static int pt_pkt_find_ovf_fup(struct pt_packet_decoder *decoder)
+{
+ for (;;) {
+ struct pt_packet packet;
+ int errcode;
+
+ errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
+ if (errcode < 0)
+ return errcode;
+
+ switch (packet.type) {
+ case ppt_fup:
+ return 1;
+
+ case ppt_invalid:
+ return -pte_bad_opc;
+
+ case ppt_unknown:
+ case ppt_pad:
+ case ppt_mnt:
+ case ppt_cbr:
+ case ppt_tsc:
+ case ppt_tma:
+ case ppt_mtc:
+ case ppt_cyc:
+ continue;
+
+ case ppt_psb:
+ case ppt_tip_pge:
+ case ppt_mode:
+ case ppt_pip:
+ case ppt_vmcs:
+ case ppt_stop:
+ case ppt_ovf:
+ case ppt_exstop:
+ case ppt_mwait:
+ case ppt_pwre:
+ case ppt_pwrx:
+ case ppt_ptw:
+ return 0;
+
+ case ppt_psbend:
+ case ppt_tip:
+ case ppt_tip_pgd:
+ case ppt_tnt_8:
+ case ppt_tnt_64:
+ return -pte_bad_context;
+ }
+ }
+}
+
+/* Find a FUP to which the current OVF may bind.
+ *
+ * Scans the trace for a FUP or for a packet that indicates that tracing is
+ * disabled.
+ *
+ * Return the relative offset of the packet following the found FUP on success.
+ * Returns zero if no FUP is found and tracing is assumed to be disabled.
+ * Returns a negative pt_error_code otherwise.
+ */
+static int pt_qry_find_ovf_fup(const struct pt_query_decoder *decoder)
+{
+ struct pt_packet_decoder pkt;
+ uint64_t begin, end, offset;
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_get_offset(decoder, &begin);
+ if (status < 0)
+ return status;
+
+ status = pt_pkt_decoder_init(&pkt, &decoder->config);
+ if (status < 0)
+ return status;
+
+ status = pt_pkt_sync_set(&pkt, begin);
+ if (status >= 0) {
+ status = pt_pkt_find_ovf_fup(&pkt);
+ if (status > 0) {
+ status = pt_pkt_get_offset(&pkt, &end);
+ if (status < 0)
+ return status;
+
+ if (end <= begin)
+ return -pte_overflow;
+
+ offset = end - begin;
+ if (INT_MAX < offset)
+ return -pte_overflow;
+
+ status = (int) offset;
+ }
+ }
+
+ pt_pkt_decoder_fini(&pkt);
+ return status;
+}
+
+int pt_qry_decode_ovf(struct pt_query_decoder *decoder)
+{
+ struct pt_time time;
+ int status, offset;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_process_pending_psb_events(decoder);
+ if (status < 0)
+ return status;
+
+ /* If we have any pending psbend events, we're done for now. */
+ if (status)
+ return 0;
+
+ /* Reset the decoder state but preserve timing. */
+ time = decoder->time;
+ pt_qry_reset(decoder);
+ decoder->time = time;
+
+ /* We must consume the OVF before we search for the binding packet. */
+ decoder->pos += ptps_ovf;
+
+ /* Overflow binds to either FUP or TIP.PGE.
+ *
+ * If the overflow can be resolved while PacketEn=1 it binds to FUP. We
+ * can see timing packets between OVF anf FUP but that's it.
+ *
+ * Otherwise, PacketEn will be zero when the overflow resolves and OVF
+ * binds to TIP.PGE. There can be packets between OVF and TIP.PGE that
+ * do not depend on PacketEn.
+ *
+ * We don't need to decode everything until TIP.PGE, however. As soon
+ * as we see a non-timing non-FUP packet, we know that tracing has been
+ * disabled before the overflow resolves.
+ */
+ offset = pt_qry_find_ovf_fup(decoder);
+ if (offset <= 0) {
+ /* Check for erratum SKD010.
+ *
+ * The FUP may have been dropped. If we can figure out that
+ * tracing is enabled and hence the FUP is missing, we resume
+ * at a later packet and a different IP.
+ */
+ if (decoder->config.errata.skd010) {
+ status = pt_qry_handle_skd010(decoder);
+ if (status <= 0)
+ return status;
+ }
+
+ /* Check for erratum APL11.
+ *
+ * We may have gotten an extra TIP.PGD, which should be
+ * diagnosed by our search for a subsequent FUP.
+ */
+ if (decoder->config.errata.apl11 &&
+ (offset == -pte_bad_context)) {
+ status = pt_qry_handle_apl11(decoder);
+ if (status <= 0)
+ return status;
+ }
+
+ /* Report the original error from searching for the FUP packet
+ * if we were not able to fix the trace.
+ *
+ * We treat an overflow at the end of the trace as standalone.
+ */
+ if (offset < 0 && offset != -pte_eos)
+ return offset;
+
+ return pt_qry_event_ovf_disabled(decoder);
+ } else {
+ /* Check for erratum APL12.
+ *
+ * We may get an extra FUP even though the overflow resolved
+ * with tracing disabled.
+ */
+ if (decoder->config.errata.apl12) {
+ status = pt_qry_handle_apl12(decoder,
+ (unsigned int) offset);
+ if (status <= 0)
+ return status;
+ }
+
+ return pt_qry_event_ovf_enabled(decoder);
+ }
+}
+
+static int pt_qry_decode_mode_exec(struct pt_query_decoder *decoder,
+ const struct pt_packet_mode_exec *packet)
+{
+ struct pt_event *event;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ /* MODE.EXEC binds to TIP. */
+ event = pt_evq_enqueue(&decoder->evq, evb_tip);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_exec_mode;
+ event->variant.exec_mode.mode = pt_get_exec_mode(packet);
+
+ return pt_qry_event_time(event, decoder);
+}
+
+static int pt_qry_decode_mode_tsx(struct pt_query_decoder *decoder,
+ const struct pt_packet_mode_tsx *packet)
+{
+ struct pt_event *event;
+
+ if (!decoder || !packet)
+ return -pte_internal;
+
+ /* MODE.TSX is standalone if tracing is disabled. */
+ if (!decoder->enabled) {
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ /* We don't have an IP in this case. */
+ event->variant.tsx.ip = 0;
+ event->ip_suppressed = 1;
+
+ /* Publish the event. */
+ decoder->event = event;
+ } else {
+ /* MODE.TSX binds to FUP. */
+ event = pt_evq_enqueue(&decoder->evq, evb_fup);
+ if (!event)
+ return -pte_nomem;
+ }
+
+ event->type = ptev_tsx;
+ event->variant.tsx.speculative = packet->intx;
+ event->variant.tsx.aborted = packet->abrt;
+
+ return pt_qry_event_time(event, decoder);
+}
+
+int pt_qry_decode_mode(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_mode packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = 0;
+ switch (packet.leaf) {
+ case pt_mol_exec:
+ errcode = pt_qry_decode_mode_exec(decoder, &packet.bits.exec);
+ break;
+
+ case pt_mol_tsx:
+ errcode = pt_qry_decode_mode_tsx(decoder, &packet.bits.tsx);
+ break;
+ }
+
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_header_mode(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_mode packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ /* Inside the header, events are reported at the end. */
+ event = pt_evq_enqueue(&decoder->evq, evb_psbend);
+ if (!event)
+ return -pte_nomem;
+
+ switch (packet.leaf) {
+ case pt_mol_exec:
+ event->type = ptev_exec_mode;
+ event->variant.exec_mode.mode =
+ pt_get_exec_mode(&packet.bits.exec);
+ break;
+
+ case pt_mol_tsx:
+ event->type = ptev_tsx;
+ event->variant.tsx.speculative = packet.bits.tsx.intx;
+ event->variant.tsx.aborted = packet.bits.tsx.abrt;
+ break;
+ }
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_psbend(struct pt_query_decoder *decoder)
+{
+ int status;
+
+ if (!decoder)
+ return -pte_internal;
+
+ status = pt_qry_process_pending_psb_events(decoder);
+ if (status < 0)
+ return status;
+
+ /* If we had any psb events, we're done for now. */
+ if (status)
+ return 0;
+
+ /* Skip the psbend extended opcode that we fetched before if no more
+ * psbend events are pending.
+ */
+ decoder->pos += ptps_psbend;
+ return 0;
+}
+
+int pt_qry_decode_tsc(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_tsc packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_qry_apply_tsc(&decoder->time, &decoder->tcal,
+ &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_header_tsc(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_tsc packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_qry_apply_header_tsc(&decoder->time, &decoder->tcal,
+ &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_cbr(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_cbr packet;
+ struct pt_event *event;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_qry_apply_cbr(&decoder->time, &decoder->tcal,
+ &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_cbr;
+ event->variant.cbr.ratio = packet.ratio;
+
+ decoder->event = event;
+
+ errcode = pt_qry_event_time(event, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_header_cbr(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_cbr packet;
+ struct pt_event *event;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_qry_apply_header_cbr(&decoder->time, &decoder->tcal,
+ &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ event = pt_evq_enqueue(&decoder->evq, evb_psbend);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_cbr;
+ event->variant.cbr.ratio = packet.ratio;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_tma(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_tma packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_tma(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_qry_apply_tma(&decoder->time, &decoder->tcal,
+ &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_mtc(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_mtc packet;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_mtc(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ errcode = pt_qry_apply_mtc(&decoder->time, &decoder->tcal,
+ &packet, &decoder->config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+static int check_erratum_skd007(struct pt_query_decoder *decoder,
+ const struct pt_packet_cyc *packet, int size)
+{
+ const uint8_t *pos;
+ uint16_t payload;
+
+ if (!decoder || !packet || size < 0)
+ return -pte_internal;
+
+ /* It must be a 2-byte CYC. */
+ if (size != 2)
+ return 0;
+
+ payload = (uint16_t) packet->value;
+
+ /* The 2nd byte of the CYC payload must look like an ext opcode. */
+ if ((payload & ~0x1f) != 0x20)
+ return 0;
+
+ /* Skip this CYC packet. */
+ pos = decoder->pos + size;
+ if (decoder->config.end <= pos)
+ return 0;
+
+ /* See if we got a second CYC that looks like an OVF ext opcode. */
+ if (*pos != pt_ext_ovf)
+ return 0;
+
+ /* We shouldn't get back-to-back CYCs unless they are sent when the
+ * counter wraps around. In this case, we'd expect a full payload.
+ *
+ * Since we got two non-full CYC packets, we assume the erratum hit.
+ */
+
+ return 1;
+}
+
+int pt_qry_decode_cyc(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_cyc packet;
+ struct pt_config *config;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ config = &decoder->config;
+
+ size = pt_pkt_read_cyc(&packet, decoder->pos, config);
+ if (size < 0)
+ return size;
+
+ if (config->errata.skd007) {
+ errcode = check_erratum_skd007(decoder, &packet, size);
+ if (errcode < 0)
+ return errcode;
+
+ /* If the erratum hits, we ignore the partial CYC and instead
+ * process the OVF following/overlapping it.
+ */
+ if (errcode) {
+ /* We skip the first byte of the CYC, which brings us
+ * to the beginning of the OVF packet.
+ */
+ decoder->pos += 1;
+ return 0;
+ }
+ }
+
+ errcode = pt_qry_apply_cyc(&decoder->time, &decoder->tcal,
+ &packet, config);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_stop(struct pt_query_decoder *decoder)
+{
+ struct pt_event *event;
+ int errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ /* Stop events are reported immediately. */
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_stop;
+
+ decoder->event = event;
+
+ errcode = pt_qry_event_time(event, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += ptps_stop;
+ return 0;
+}
+
+int pt_qry_header_vmcs(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_vmcs packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ event = pt_evq_enqueue(&decoder->evq, evb_psbend);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_async_vmcs;
+ event->variant.async_vmcs.base = packet.base;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_vmcs(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_vmcs packet;
+ struct pt_event *event;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ /* VMCS events bind to the same IP as an in-flight async paging event.
+ *
+ * In that case, the VMCS event should be applied first. We reorder
+ * events here to simplify the life of higher layers.
+ */
+ event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_paging);
+ if (event) {
+ struct pt_event *paging;
+
+ paging = pt_evq_enqueue(&decoder->evq, evb_tip);
+ if (!paging)
+ return -pte_nomem;
+
+ *paging = *event;
+
+ event->type = ptev_async_vmcs;
+ event->variant.async_vmcs.base = packet.base;
+
+ decoder->pos += size;
+ return 0;
+ }
+
+ /* VMCS events bind to the same TIP packet as an in-flight async
+ * branch event.
+ */
+ event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch);
+ if (event) {
+ event = pt_evq_enqueue(&decoder->evq, evb_tip);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_async_vmcs;
+ event->variant.async_vmcs.base = packet.base;
+
+ decoder->pos += size;
+ return 0;
+ }
+
+ /* VMCS events that do not bind to an in-flight async event are
+ * stand-alone.
+ */
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_vmcs;
+ event->variant.vmcs.base = packet.base;
+
+ decoder->event = event;
+
+ errcode = pt_qry_event_time(event, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_mnt(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_mnt packet;
+ struct pt_event *event;
+ int size, errcode;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_mnt(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_mnt;
+ event->variant.mnt.payload = packet.payload;
+
+ decoder->event = event;
+
+ errcode = pt_qry_event_time(event, decoder);
+ if (errcode < 0)
+ return errcode;
+
+ decoder->pos += size;
+
+ return 0;
+}
+
+int pt_qry_header_mnt(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_mnt packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_mnt(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ event = pt_evq_enqueue(&decoder->evq, evb_psbend);
+ if (!event)
+ return -pte_nomem;
+
+ event->type = ptev_mnt;
+ event->variant.mnt.payload = packet.payload;
+
+ decoder->pos += size;
+
+ return 0;
+}
+
+int pt_qry_decode_exstop(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_exstop packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_exstop(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ if (packet.ip) {
+ event = pt_evq_enqueue(&decoder->evq, evb_fup);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_exstop;
+ } else {
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_exstop;
+
+ event->ip_suppressed = 1;
+ event->variant.exstop.ip = 0ull;
+
+ decoder->event = event;
+ }
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_mwait(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_mwait packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_mwait(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ event = pt_evq_enqueue(&decoder->evq, evb_fup);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_mwait;
+ event->variant.mwait.hints = packet.hints;
+ event->variant.mwait.ext = packet.ext;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_pwre(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_pwre packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_pwre(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_pwre;
+ event->variant.pwre.state = packet.state;
+ event->variant.pwre.sub_state = packet.sub_state;
+
+ if (packet.hw)
+ event->variant.pwre.hw = 1;
+
+ decoder->event = event;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_pwrx(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_pwrx packet;
+ struct pt_event *event;
+ int size;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_pwrx(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->type = ptev_pwrx;
+ event->variant.pwrx.last = packet.last;
+ event->variant.pwrx.deepest = packet.deepest;
+
+ if (packet.interrupt)
+ event->variant.pwrx.interrupt = 1;
+ if (packet.store)
+ event->variant.pwrx.store = 1;
+ if (packet.autonomous)
+ event->variant.pwrx.autonomous = 1;
+
+ decoder->event = event;
+
+ decoder->pos += size;
+ return 0;
+}
+
+int pt_qry_decode_ptw(struct pt_query_decoder *decoder)
+{
+ struct pt_packet_ptw packet;
+ struct pt_event *event;
+ int size, pls;
+
+ if (!decoder)
+ return -pte_internal;
+
+ size = pt_pkt_read_ptw(&packet, decoder->pos, &decoder->config);
+ if (size < 0)
+ return size;
+
+ pls = pt_ptw_size(packet.plc);
+ if (pls < 0)
+ return pls;
+
+ if (packet.ip) {
+ event = pt_evq_enqueue(&decoder->evq, evb_fup);
+ if (!event)
+ return -pte_internal;
+ } else {
+ event = pt_evq_standalone(&decoder->evq);
+ if (!event)
+ return -pte_internal;
+
+ event->ip_suppressed = 1;
+
+ decoder->event = event;
+ }
+
+ event->type = ptev_ptwrite;
+ event->variant.ptwrite.size = (uint8_t) pls;
+ event->variant.ptwrite.payload = packet.payload;
+
+ decoder->pos += size;
+ return 0;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_retstack.c b/contrib/processor-trace/libipt/src/pt_retstack.c
new file mode 100644
index 0000000000000..cc568367ed1a2
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_retstack.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_retstack.h"
+
+#include "intel-pt.h"
+
+
+void pt_retstack_init(struct pt_retstack *retstack)
+{
+ if (!retstack)
+ return;
+
+ retstack->top = 0;
+ retstack->bottom = 0;
+}
+
+int pt_retstack_is_empty(const struct pt_retstack *retstack)
+{
+ if (!retstack)
+ return -pte_invalid;
+
+ return (retstack->top == retstack->bottom ? 1 : 0);
+}
+
+int pt_retstack_pop(struct pt_retstack *retstack, uint64_t *ip)
+{
+ uint8_t top;
+
+ if (!retstack)
+ return -pte_invalid;
+
+ top = retstack->top;
+
+ if (top == retstack->bottom)
+ return -pte_retstack_empty;
+
+ top = (!top ? pt_retstack_size : top - 1);
+
+ retstack->top = top;
+
+ if (ip)
+ *ip = retstack->stack[top];
+
+ return 0;
+}
+
+int pt_retstack_push(struct pt_retstack *retstack, uint64_t ip)
+{
+ uint8_t top, bottom;
+
+ if (!retstack)
+ return -pte_invalid;
+
+ top = retstack->top;
+ bottom = retstack->bottom;
+
+ retstack->stack[top] = ip;
+
+ top = (top == pt_retstack_size ? 0 : top + 1);
+
+ if (bottom == top)
+ bottom = (bottom == pt_retstack_size ? 0 : bottom + 1);
+
+ retstack->top = top;
+ retstack->bottom = bottom;
+
+ return 0;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_section.c b/contrib/processor-trace/libipt/src/pt_section.c
new file mode 100644
index 0000000000000..77bae915fb7b7
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_section.c
@@ -0,0 +1,643 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_section.h"
+#include "pt_block_cache.h"
+#include "pt_image_section_cache.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+
+static char *dupstr(const char *str)
+{
+ char *dup;
+ size_t len;
+
+ if (!str)
+ return NULL;
+
+ len = strlen(str);
+ dup = malloc(len + 1);
+ if (!dup)
+ return NULL;
+
+ return strcpy(dup, str);
+}
+
+struct pt_section *pt_mk_section(const char *filename, uint64_t offset,
+ uint64_t size)
+{
+ struct pt_section *section;
+ uint64_t fsize;
+ void *status;
+ int errcode;
+
+ errcode = pt_section_mk_status(&status, &fsize, filename);
+ if (errcode < 0)
+ return NULL;
+
+ /* Fail if the requested @offset lies beyond the end of @file. */
+ if (fsize <= offset)
+ goto out_status;
+
+ /* Truncate @size so the entire range lies within @file. */
+ fsize -= offset;
+ if (fsize < size)
+ size = fsize;
+
+ section = malloc(sizeof(*section));
+ if (!section)
+ goto out_status;
+
+ memset(section, 0, sizeof(*section));
+
+ section->filename = dupstr(filename);
+ section->status = status;
+ section->offset = offset;
+ section->size = size;
+ section->ucount = 1;
+
+#if defined(FEATURE_THREADS)
+
+ errcode = mtx_init(&section->lock, mtx_plain);
+ if (errcode != thrd_success) {
+ free(section->filename);
+ free(section);
+ goto out_status;
+ }
+
+ errcode = mtx_init(&section->alock, mtx_plain);
+ if (errcode != thrd_success) {
+ mtx_destroy(&section->lock);
+ free(section->filename);
+ free(section);
+ goto out_status;
+ }
+
+#endif /* defined(FEATURE_THREADS) */
+
+ return section;
+
+out_status:
+ free(status);
+ return NULL;
+}
+
+int pt_section_lock(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&section->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+int pt_section_unlock(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&section->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static void pt_section_free(struct pt_section *section)
+{
+ if (!section)
+ return;
+
+#if defined(FEATURE_THREADS)
+
+ mtx_destroy(&section->alock);
+ mtx_destroy(&section->lock);
+
+#endif /* defined(FEATURE_THREADS) */
+
+ free(section->filename);
+ free(section->status);
+ free(section);
+}
+
+int pt_section_get(struct pt_section *section)
+{
+ uint16_t ucount;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ ucount = section->ucount + 1;
+ if (!ucount) {
+ (void) pt_section_unlock(section);
+ return -pte_overflow;
+ }
+
+ section->ucount = ucount;
+
+ return pt_section_unlock(section);
+}
+
+int pt_section_put(struct pt_section *section)
+{
+ uint16_t ucount, mcount;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ mcount = section->mcount;
+ ucount = section->ucount;
+ if (ucount > 1) {
+ section->ucount = ucount - 1;
+ return pt_section_unlock(section);
+ }
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (!ucount || mcount)
+ return -pte_internal;
+
+ pt_section_free(section);
+ return 0;
+}
+
+static int pt_section_lock_attach(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&section->alock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static int pt_section_unlock_attach(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&section->alock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+int pt_section_attach(struct pt_section *section,
+ struct pt_image_section_cache *iscache)
+{
+ uint16_t acount, ucount;
+ int errcode;
+
+ if (!section || !iscache)
+ return -pte_internal;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ ucount = section->ucount;
+ acount = section->acount;
+ if (!acount) {
+ if (section->iscache || !ucount)
+ goto out_unlock;
+
+ section->iscache = iscache;
+ section->acount = 1;
+
+ return pt_section_unlock_attach(section);
+ }
+
+ acount += 1;
+ if (!acount) {
+ (void) pt_section_unlock_attach(section);
+ return -pte_overflow;
+ }
+
+ if (ucount < acount)
+ goto out_unlock;
+
+ if (section->iscache != iscache)
+ goto out_unlock;
+
+ section->acount = acount;
+
+ return pt_section_unlock_attach(section);
+
+ out_unlock:
+ (void) pt_section_unlock_attach(section);
+ return -pte_internal;
+}
+
+int pt_section_detach(struct pt_section *section,
+ struct pt_image_section_cache *iscache)
+{
+ uint16_t acount, ucount;
+ int errcode;
+
+ if (!section || !iscache)
+ return -pte_internal;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (section->iscache != iscache)
+ goto out_unlock;
+
+ acount = section->acount;
+ if (!acount)
+ goto out_unlock;
+
+ acount -= 1;
+ ucount = section->ucount;
+ if (ucount < acount)
+ goto out_unlock;
+
+ section->acount = acount;
+ if (!acount)
+ section->iscache = NULL;
+
+ return pt_section_unlock_attach(section);
+
+ out_unlock:
+ (void) pt_section_unlock_attach(section);
+ return -pte_internal;
+}
+
+const char *pt_section_filename(const struct pt_section *section)
+{
+ if (!section)
+ return NULL;
+
+ return section->filename;
+}
+
+uint64_t pt_section_size(const struct pt_section *section)
+{
+ if (!section)
+ return 0ull;
+
+ return section->size;
+}
+
+static int pt_section_bcache_memsize(const struct pt_section *section,
+ uint64_t *psize)
+{
+ struct pt_block_cache *bcache;
+
+ if (!section || !psize)
+ return -pte_internal;
+
+ bcache = section->bcache;
+ if (!bcache) {
+ *psize = 0ull;
+ return 0;
+ }
+
+ *psize = sizeof(*bcache) +
+ (bcache->nentries * sizeof(struct pt_bcache_entry));
+
+ return 0;
+}
+
+static int pt_section_memsize_locked(const struct pt_section *section,
+ uint64_t *psize)
+{
+ uint64_t msize, bcsize;
+ int (*memsize)(const struct pt_section *section, uint64_t *size);
+ int errcode;
+
+ if (!section || !psize)
+ return -pte_internal;
+
+ memsize = section->memsize;
+ if (!memsize) {
+ if (section->mcount)
+ return -pte_internal;
+
+ *psize = 0ull;
+ return 0;
+ }
+
+ errcode = memsize(section, &msize);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_bcache_memsize(section, &bcsize);
+ if (errcode < 0)
+ return errcode;
+
+ *psize = msize + bcsize;
+
+ return 0;
+}
+
+int pt_section_memsize(struct pt_section *section, uint64_t *size)
+{
+ int errcode, status;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_section_memsize_locked(section, size);
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+}
+
+uint64_t pt_section_offset(const struct pt_section *section)
+{
+ if (!section)
+ return 0ull;
+
+ return section->offset;
+}
+
+int pt_section_alloc_bcache(struct pt_section *section)
+{
+ struct pt_image_section_cache *iscache;
+ struct pt_block_cache *bcache;
+ uint64_t ssize, memsize;
+ uint32_t csize;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ if (!section->mcount)
+ return -pte_internal;
+
+ ssize = pt_section_size(section);
+ csize = (uint32_t) ssize;
+
+ if (csize != ssize)
+ return -pte_not_supported;
+
+ memsize = 0ull;
+
+ /* We need to take both the attach and the section lock in order to pair
+ * the block cache allocation and the resize notification.
+ *
+ * This allows map notifications in between but they only change the
+ * order of sections in the cache.
+ *
+ * The attach lock needs to be taken first.
+ */
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ goto out_alock;
+
+ bcache = pt_section_bcache(section);
+ if (bcache) {
+ errcode = 0;
+ goto out_lock;
+ }
+
+ bcache = pt_bcache_alloc(csize);
+ if (!bcache) {
+ errcode = -pte_nomem;
+ goto out_lock;
+ }
+
+ /* Install the block cache. It will become visible and may be used
+ * immediately.
+ *
+ * If we fail later on, we leave the block cache and report the error to
+ * the allocating decoder thread.
+ */
+ section->bcache = bcache;
+
+ errcode = pt_section_memsize_locked(section, &memsize);
+ if (errcode < 0)
+ goto out_lock;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ goto out_alock;
+
+ if (memsize) {
+ iscache = section->iscache;
+ if (iscache) {
+ errcode = pt_iscache_notify_resize(iscache, section,
+ memsize);
+ if (errcode < 0)
+ goto out_alock;
+ }
+ }
+
+ return pt_section_unlock_attach(section);
+
+
+out_lock:
+ (void) pt_section_unlock(section);
+
+out_alock:
+ (void) pt_section_unlock_attach(section);
+ return errcode;
+}
+
+int pt_section_on_map_lock(struct pt_section *section)
+{
+ struct pt_image_section_cache *iscache;
+ int errcode, status;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ iscache = section->iscache;
+ if (!iscache)
+ return pt_section_unlock_attach(section);
+
+ /* There is a potential deadlock when @section was unmapped again and
+ * @iscache tries to map it. This would cause this function to be
+ * re-entered while we're still holding the attach lock.
+ *
+ * This scenario is very unlikely, though, since our caller does not yet
+ * know whether pt_section_map() succeeded.
+ */
+ status = pt_iscache_notify_map(iscache, section);
+
+ errcode = pt_section_unlock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+}
+
+int pt_section_map_share(struct pt_section *section)
+{
+ uint16_t mcount;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ mcount = section->mcount;
+ if (!mcount) {
+ (void) pt_section_unlock(section);
+ return -pte_internal;
+ }
+
+ mcount += 1;
+ if (!mcount) {
+ (void) pt_section_unlock(section);
+ return -pte_overflow;
+ }
+
+ section->mcount = mcount;
+
+ return pt_section_unlock(section);
+}
+
+int pt_section_unmap(struct pt_section *section)
+{
+ uint16_t mcount;
+ int errcode, status;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ mcount = section->mcount;
+
+ errcode = -pte_nomap;
+ if (!mcount)
+ goto out_unlock;
+
+ section->mcount = mcount -= 1;
+ if (mcount)
+ return pt_section_unlock(section);
+
+ errcode = -pte_internal;
+ if (!section->unmap)
+ goto out_unlock;
+
+ status = section->unmap(section);
+
+ pt_bcache_free(section->bcache);
+ section->bcache = NULL;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ return status;
+
+out_unlock:
+ (void) pt_section_unlock(section);
+ return errcode;
+}
+
+int pt_section_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ uint64_t limit, space;
+
+ if (!section)
+ return -pte_internal;
+
+ if (!section->read)
+ return -pte_nomap;
+
+ limit = section->size;
+ if (limit <= offset)
+ return -pte_nomap;
+
+ /* Truncate if we try to read past the end of the section. */
+ space = limit - offset;
+ if (space < size)
+ size = (uint16_t) space;
+
+ return section->read(section, buffer, size, offset);
+}
diff --git a/contrib/processor-trace/libipt/src/pt_section_file.c b/contrib/processor-trace/libipt/src/pt_section_file.c
new file mode 100644
index 0000000000000..299a94eb7d042
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_section_file.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_section.h"
+#include "pt_section_file.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+
+static int fmap_init(struct pt_sec_file_mapping *mapping)
+{
+ if (!mapping)
+ return -pte_internal;
+
+ memset(mapping, 0, sizeof(*mapping));
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_init(&mapping->lock, mtx_plain);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static void fmap_fini(struct pt_sec_file_mapping *mapping)
+{
+ if (!mapping)
+ return;
+
+ fclose(mapping->file);
+
+#if defined(FEATURE_THREADS)
+
+ mtx_destroy(&mapping->lock);
+
+#endif /* defined(FEATURE_THREADS) */
+}
+
+static int fmap_lock(struct pt_sec_file_mapping *mapping)
+{
+ if (!mapping)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&mapping->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static int fmap_unlock(struct pt_sec_file_mapping *mapping)
+{
+ if (!mapping)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&mapping->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+int pt_sec_file_map(struct pt_section *section, FILE *file)
+{
+ struct pt_sec_file_mapping *mapping;
+ uint64_t offset, size;
+ long begin, end, fsize;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (mapping)
+ return -pte_internal;
+
+ offset = section->offset;
+ size = section->size;
+
+ begin = (long) offset;
+ end = begin + (long) size;
+
+ /* Check for overflows. */
+ if ((uint64_t) begin != offset)
+ return -pte_bad_image;
+
+ if ((uint64_t) end != (offset + size))
+ return -pte_bad_image;
+
+ if (end < begin)
+ return -pte_bad_image;
+
+ /* Validate that the section lies within the file. */
+ errcode = fseek(file, 0, SEEK_END);
+ if (errcode)
+ return -pte_bad_image;
+
+ fsize = ftell(file);
+ if (fsize < 0)
+ return -pte_bad_image;
+
+ if (fsize < end)
+ return -pte_bad_image;
+
+ mapping = malloc(sizeof(*mapping));
+ if (!mapping)
+ return -pte_nomem;
+
+ errcode = fmap_init(mapping);
+ if (errcode < 0)
+ goto out_mem;
+
+ mapping->file = file;
+ mapping->begin = begin;
+ mapping->end = end;
+
+ section->mapping = mapping;
+ section->unmap = pt_sec_file_unmap;
+ section->read = pt_sec_file_read;
+ section->memsize = pt_sec_file_memsize;
+
+ return 0;
+
+out_mem:
+ free(mapping);
+ return errcode;
+}
+
+int pt_sec_file_unmap(struct pt_section *section)
+{
+ struct pt_sec_file_mapping *mapping;
+
+ if (!section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+
+ if (!mapping || !section->unmap || !section->read || !section->memsize)
+ return -pte_internal;
+
+ section->mapping = NULL;
+ section->unmap = NULL;
+ section->read = NULL;
+ section->memsize = NULL;
+
+ fmap_fini(mapping);
+ free(mapping);
+
+ return 0;
+}
+
+int pt_sec_file_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ struct pt_sec_file_mapping *mapping;
+ FILE *file;
+ long begin;
+ size_t read;
+ int errcode;
+
+ if (!buffer || !section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping)
+ return -pte_internal;
+
+ file = mapping->file;
+
+ /* We already checked in pt_section_read() that the requested memory
+ * lies within the section's boundaries.
+ *
+ * And we checked that the file covers the entire section in
+ * pt_sec_file_map(). There's no need to check for overflows, again.
+ */
+ begin = mapping->begin + (long) offset;
+
+ errcode = fmap_lock(mapping);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = fseek(file, begin, SEEK_SET);
+ if (errcode)
+ goto out_unlock;
+
+ read = fread(buffer, 1, size, file);
+
+ errcode = fmap_unlock(mapping);
+ if (errcode < 0)
+ return errcode;
+
+ return (int) read;
+
+out_unlock:
+ (void) fmap_unlock(mapping);
+ return -pte_nomap;
+}
+
+int pt_sec_file_memsize(const struct pt_section *section, uint64_t *size)
+{
+ if (!section || !size)
+ return -pte_internal;
+
+ if (!section->mapping)
+ return -pte_internal;
+
+ *size = 0ull;
+
+ return 0;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_sync.c b/contrib/processor-trace/libipt/src/pt_sync.c
new file mode 100644
index 0000000000000..cf604203b17b7
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_sync.c
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_sync.h"
+#include "pt_packet.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+
+/* A psb packet contains a unique 2-byte repeating pattern.
+ *
+ * There are only two ways to fill up a 64bit work with such a pattern.
+ */
+static const uint64_t psb_pattern[] = {
+ ((uint64_t) pt_psb_lohi | (uint64_t) pt_psb_lohi << 16 |
+ (uint64_t) pt_psb_lohi << 32 | (uint64_t) pt_psb_lohi << 48),
+ ((uint64_t) pt_psb_hilo | (uint64_t) pt_psb_hilo << 16 |
+ (uint64_t) pt_psb_hilo << 32 | (uint64_t) pt_psb_hilo << 48)
+};
+
+static const uint8_t *truncate(const uint8_t *pointer, size_t alignment)
+{
+ uintptr_t raw = (uintptr_t) pointer;
+
+ raw /= alignment;
+ raw *= alignment;
+
+ return (const uint8_t *) raw;
+}
+
+static const uint8_t *align(const uint8_t *pointer, size_t alignment)
+{
+ return truncate(pointer + alignment - 1, alignment);
+}
+
+/* Find a psb packet given a position somewhere in the payload.
+ *
+ * Return the position of the psb packet.
+ * Return NULL, if this is not a psb packet.
+ */
+static const uint8_t *pt_find_psb(const uint8_t *pos,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+ int errcode;
+
+ if (!pos || !config)
+ return NULL;
+
+ begin = config->begin;
+ end = config->end;
+
+ /* Navigate to the end of the psb payload pattern.
+ *
+ * Beware that PSB is an extended opcode. We must not confuse the extend
+ * opcode of the following packet as belonging to the PSB.
+ */
+ if (*pos != pt_psb_hi)
+ pos++;
+
+ for (; (pos + 1) < end; pos += 2) {
+ uint8_t hi, lo;
+
+ hi = pos[0];
+ lo = pos[1];
+
+ if (hi != pt_psb_hi)
+ break;
+
+ if (lo != pt_psb_lo)
+ break;
+ }
+ /*
+ * We're right after the psb payload and within the buffer.
+ * Navigate to the expected beginning of the psb packet.
+ */
+ pos -= ptps_psb;
+
+ /* Check if we're still inside the buffer. */
+ if (pos < begin)
+ return NULL;
+
+ /* Check that this is indeed a psb packet we're at. */
+ if (pos[0] != pt_opc_psb || pos[1] != pt_ext_psb)
+ return NULL;
+
+ errcode = pt_pkt_read_psb(pos, config);
+ if (errcode < 0)
+ return NULL;
+
+ return pos;
+}
+
+static int pt_sync_within_bounds(const uint8_t *pos, const uint8_t *begin,
+ const uint8_t *end)
+{
+ /* We allow @pos == @end representing the very end of the trace.
+ *
+ * This will result in -pte_eos when we actually try to read from @pos.
+ */
+ return (begin <= pos) && (pos <= end);
+}
+
+int pt_sync_set(const uint8_t **sync, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+ int errcode;
+
+ if (!sync || !pos || !config)
+ return -pte_internal;
+
+ begin = config->begin;
+ end = config->end;
+
+ if (!pt_sync_within_bounds(pos, begin, end))
+ return -pte_eos;
+
+ if (end < pos + 2)
+ return -pte_eos;
+
+ /* Check that this is indeed a psb packet we're at. */
+ if (pos[0] != pt_opc_psb || pos[1] != pt_ext_psb)
+ return -pte_nosync;
+
+ errcode = pt_pkt_read_psb(pos, config);
+ if (errcode < 0)
+ return errcode;
+
+ *sync = pos;
+
+ return 0;
+}
+
+int pt_sync_forward(const uint8_t **sync, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+
+ if (!sync || !pos || !config)
+ return -pte_internal;
+
+ begin = config->begin;
+ end = config->end;
+
+ if (!pt_sync_within_bounds(pos, begin, end))
+ return -pte_internal;
+
+ /* We search for a full 64bit word. It's OK to skip the current one. */
+ pos = align(pos, sizeof(*psb_pattern));
+
+ /* Search for the psb payload pattern in the buffer. */
+ for (;;) {
+ const uint8_t *current = pos;
+ uint64_t val;
+
+ pos += sizeof(uint64_t);
+ if (end < pos)
+ return -pte_eos;
+
+ val = * (const uint64_t *) current;
+
+ if ((val != psb_pattern[0]) && (val != psb_pattern[1]))
+ continue;
+
+ /* We found a 64bit word's worth of psb payload pattern. */
+ current = pt_find_psb(pos, config);
+ if (!current)
+ continue;
+
+ *sync = current;
+ return 0;
+ }
+}
+
+int pt_sync_backward(const uint8_t **sync, const uint8_t *pos,
+ const struct pt_config *config)
+{
+ const uint8_t *begin, *end;
+
+ if (!sync || !pos || !config)
+ return -pte_internal;
+
+ begin = config->begin;
+ end = config->end;
+
+ if (!pt_sync_within_bounds(pos, begin, end))
+ return -pte_internal;
+
+ /* We search for a full 64bit word. It's OK to skip the current one. */
+ pos = truncate(pos, sizeof(*psb_pattern));
+
+ /* Search for the psb payload pattern in the buffer. */
+ for (;;) {
+ const uint8_t *next = pos;
+ uint64_t val;
+
+ pos -= sizeof(uint64_t);
+ if (pos < begin)
+ return -pte_eos;
+
+ val = * (const uint64_t *) pos;
+
+ if ((val != psb_pattern[0]) && (val != psb_pattern[1]))
+ continue;
+
+ /* We found a 64bit word's worth of psb payload pattern. */
+ next = pt_find_psb(next, config);
+ if (!next)
+ continue;
+
+ *sync = next;
+ return 0;
+ }
+}
diff --git a/contrib/processor-trace/libipt/src/pt_time.c b/contrib/processor-trace/libipt/src/pt_time.c
new file mode 100644
index 0000000000000..8c55ccde0fd66
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_time.c
@@ -0,0 +1,674 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_time.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+#include <limits.h>
+
+
+void pt_time_init(struct pt_time *time)
+{
+ if (!time)
+ return;
+
+ memset(time, 0, sizeof(*time));
+}
+
+int pt_time_query_tsc(uint64_t *tsc, uint32_t *lost_mtc,
+ uint32_t *lost_cyc, const struct pt_time *time)
+{
+ if (!tsc || !time)
+ return -pte_internal;
+
+ *tsc = time->tsc;
+
+ if (lost_mtc)
+ *lost_mtc = time->lost_mtc;
+ if (lost_cyc)
+ *lost_cyc = time->lost_cyc;
+
+ if (!time->have_tsc)
+ return -pte_no_time;
+
+ return 0;
+}
+
+int pt_time_query_cbr(uint32_t *cbr, const struct pt_time *time)
+{
+ if (!cbr || !time)
+ return -pte_internal;
+
+ if (!time->have_cbr)
+ return -pte_no_cbr;
+
+ *cbr = time->cbr;
+
+ return 0;
+}
+
+/* Compute the distance between two CTC sources.
+ *
+ * We adjust a single wrap-around but fail if the distance is bigger than that.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_time_ctc_delta(uint32_t *ctc_delta, uint32_t ctc,
+ uint32_t last_ctc, const struct pt_config *config)
+{
+ if (!config || !ctc_delta)
+ return -pte_internal;
+
+ /* Correct a single wrap-around. If we lost enough MTCs to wrap
+ * around twice, timing will be wrong until the next TSC.
+ */
+ if (ctc < last_ctc) {
+ ctc += 1u << (config->mtc_freq + pt_pl_mtc_bit_size);
+
+ /* Since we only store the CTC between TMA/MTC or MTC/TMC a
+ * single correction should suffice.
+ */
+ if (ctc < last_ctc)
+ return -pte_bad_packet;
+ }
+
+ *ctc_delta = ctc - last_ctc;
+ return 0;
+}
+
+/* Translate CTC into the same unit as the FastCounter by multiplying with P.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_time_ctc_fc(uint64_t *fc, uint64_t ctc,
+ const struct pt_config *config)
+{
+ uint32_t eax, ebx;
+
+ if (!fc || !config)
+ return -pte_internal;
+
+ eax = config->cpuid_0x15_eax;
+ ebx = config->cpuid_0x15_ebx;
+
+ /* Neither multiply nor divide by zero. */
+ if (!eax || !ebx)
+ return -pte_bad_config;
+
+ *fc = (ctc * ebx) / eax;
+ return 0;
+}
+
+int pt_time_update_tsc(struct pt_time *time,
+ const struct pt_packet_tsc *packet,
+ const struct pt_config *config)
+{
+ (void) config;
+
+ if (!time || !packet)
+ return -pte_internal;
+
+ time->have_tsc = 1;
+ time->have_tma = 0;
+ time->have_mtc = 0;
+ time->tsc = time->base = packet->tsc;
+ time->ctc = 0;
+ time->fc = 0ull;
+
+ /* We got the full time; we recover from previous losses. */
+ time->lost_mtc = 0;
+ time->lost_cyc = 0;
+
+ return 0;
+}
+
+int pt_time_update_cbr(struct pt_time *time,
+ const struct pt_packet_cbr *packet,
+ const struct pt_config *config)
+{
+ (void) config;
+
+ if (!time || !packet)
+ return -pte_internal;
+
+ time->have_cbr = 1;
+ time->cbr = packet->ratio;
+
+ return 0;
+}
+
+int pt_time_update_tma(struct pt_time *time,
+ const struct pt_packet_tma *packet,
+ const struct pt_config *config)
+{
+ uint32_t ctc, mtc_freq, mtc_hi, ctc_mask;
+ uint64_t fc;
+
+ if (!time || !packet || !config)
+ return -pte_internal;
+
+ /* Without a TSC something is seriously wrong. */
+ if (!time->have_tsc)
+ return -pte_bad_context;
+
+ /* We shouldn't have more than one TMA per TSC. */
+ if (time->have_tma)
+ return -pte_bad_context;
+
+ /* We're ignoring MTC between TSC and TMA. */
+ if (time->have_mtc)
+ return -pte_internal;
+
+ ctc = packet->ctc;
+ fc = packet->fc;
+
+ mtc_freq = config->mtc_freq;
+ mtc_hi = mtc_freq + pt_pl_mtc_bit_size;
+
+ /* A mask for the relevant CTC bits ignoring high-order bits that are
+ * not provided by MTC.
+ */
+ ctc_mask = (1u << mtc_hi) - 1u;
+
+ time->have_tma = 1;
+ time->base -= fc;
+ time->fc += fc;
+
+ /* If the MTC frequency is low enough that TMA provides the full CTC
+ * value, we can use the TMA as an MTC.
+ *
+ * If it isn't, we will estimate the preceding MTC based on the CTC bits
+ * the TMA provides at the next MTC. We forget about the previous MTC
+ * in this case.
+ *
+ * If no MTC packets are dropped around TMA, we will estimate the
+ * forgotten value again at the next MTC.
+ *
+ * If MTC packets are dropped, we can't really tell where in this
+ * extended MTC period the TSC occurred. The estimation will place it
+ * right before the next MTC.
+ */
+ if (mtc_hi <= pt_pl_tma_ctc_bit_size)
+ time->have_mtc = 1;
+
+ /* In both cases, we store the TMA's CTC bits until the next MTC. */
+ time->ctc = time->ctc_cyc = ctc & ctc_mask;
+
+ return 0;
+}
+
+int pt_time_update_mtc(struct pt_time *time,
+ const struct pt_packet_mtc *packet,
+ const struct pt_config *config)
+{
+ uint32_t last_ctc, ctc, ctc_delta;
+ uint64_t tsc, base;
+ uint8_t mtc_freq;
+ int errcode, have_tsc, have_tma, have_mtc;
+
+ if (!time || !packet || !config)
+ return -pte_internal;
+
+ have_tsc = time->have_tsc;
+ have_tma = time->have_tma;
+ have_mtc = time->have_mtc;
+
+ /* We ignore MTCs between TSC and TMA to avoid apparent CTC overflows.
+ *
+ * Later MTCs will ensure that no time is lost - provided TMA provides
+ * enough bits. If TMA doesn't provide any of the MTC bits we may place
+ * the TSC into the wrong MTC period.
+ */
+ if (have_tsc && !have_tma)
+ return 0;
+
+ base = time->base;
+ last_ctc = time->ctc;
+ mtc_freq = config->mtc_freq;
+
+ ctc = packet->ctc << mtc_freq;
+
+ /* Store our CTC value if we have or would have reset FC. */
+ if (time->fc || time->lost_cyc || !have_mtc)
+ time->ctc_cyc = ctc;
+
+ /* Prepare for the next packet in case we error out below. */
+ time->have_mtc = 1;
+ time->fc = 0ull;
+ time->ctc = ctc;
+
+ /* We recover from previous CYC losses. */
+ time->lost_cyc = 0;
+
+ /* Avoid a big jump when we see the first MTC with an arbitrary CTC
+ * payload.
+ */
+ if (!have_mtc) {
+ uint32_t ctc_lo, ctc_hi;
+
+ /* If we have not seen a TMA, we ignore this first MTC.
+ *
+ * We have no idea where in this MTC period tracing started.
+ * We could lose an entire MTC period or just a tiny fraction.
+ *
+ * On the other hand, if we assumed a previous MTC value, we
+ * might make just the same error.
+ */
+ if (!have_tma)
+ return 0;
+
+ /* The TMA's CTC value didn't provide enough bits - otherwise,
+ * we would have treated the TMA as an MTC.
+ */
+ if (last_ctc & ~pt_pl_tma_ctc_mask)
+ return -pte_internal;
+
+ /* Split this MTC's CTC value into low and high parts with
+ * respect to the bits provided by TMA.
+ */
+ ctc_lo = ctc & pt_pl_tma_ctc_mask;
+ ctc_hi = ctc & ~pt_pl_tma_ctc_mask;
+
+ /* We estimate the high-order CTC bits that are not provided by
+ * TMA based on the CTC bits provided by this MTC.
+ *
+ * We assume that no MTC packets were dropped around TMA. If
+ * there are, we might place the TSC into the wrong MTC period
+ * depending on how many CTC bits TMA provides and how many MTC
+ * packets were dropped.
+ *
+ * Note that we may underflow which results in more bits to be
+ * set than MTC packets may provide. Drop those extra bits.
+ */
+ if (ctc_lo < last_ctc) {
+ ctc_hi -= 1u << pt_pl_tma_ctc_bit_size;
+ ctc_hi &= pt_pl_mtc_mask << mtc_freq;
+ }
+
+ last_ctc |= ctc_hi;
+ }
+
+ errcode = pt_time_ctc_delta(&ctc_delta, ctc, last_ctc, config);
+ if (errcode < 0) {
+ time->lost_mtc += 1;
+ return errcode;
+ }
+
+ errcode = pt_time_ctc_fc(&tsc, ctc_delta, config);
+ if (errcode < 0)
+ return errcode;
+
+ base += tsc;
+ time->tsc = time->base = base;
+
+ return 0;
+}
+
+/* Adjust a CYC packet's payload spanning multiple MTC periods.
+ *
+ * CYC packets measure the Fast Counter since the last CYC(-eligible) packet.
+ * Depending on the CYC threshold, we may not get a CYC for each MTC, so a CYC
+ * period may overlap with or even span multiple MTC periods.
+ *
+ * We can't do much about the overlap case without examining all packets in
+ * the respective periods. We leave this as expected imprecision.
+ *
+ * If we find a CYC packet to span multiple MTC packets, though, we try to
+ * approximate the portion for the current MTC period by subtracting the
+ * estimated portion for previous MTC periods using calibration information.
+ *
+ * We only consider MTC. For the first CYC after TSC, the corresponding TMA
+ * will contain the Fast Counter at TSC.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int pt_time_adjust_cyc(uint64_t *cyc, const struct pt_time *time,
+ const struct pt_config *config, uint64_t fcr)
+{
+ uint32_t last_ctc, ctc, ctc_delta;
+ uint64_t fc, total_cyc, old_cyc;
+ int errcode;
+
+ if (!time || !config || !fcr)
+ return -pte_internal;
+
+ last_ctc = time->ctc_cyc;
+ ctc = time->ctc;
+
+ /* There is nothing to do if this is the current MTC period. */
+ if (ctc == last_ctc)
+ return 0;
+
+ /* Calibration computes
+ *
+ * fc = (ctc_delta * cpuid[0x15].ebx) / cpuid[0x15].eax.
+ * fcr = (fc << pt_tcal_fcr_shr) / cyc
+ *
+ * So cyc = (fc << pt_tcal_fcr_shr) / fcr.
+ */
+
+ errcode = pt_time_ctc_delta(&ctc_delta, ctc, last_ctc, config);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_time_ctc_fc(&fc, ctc_delta, config);
+ if (errcode < 0)
+ return errcode;
+
+ old_cyc = (fc << pt_tcal_fcr_shr) / fcr;
+ total_cyc = *cyc;
+
+ /* Make sure we don't wrap around. If we would, attribute the entire
+ * CYC payload to any previous MTC period.
+ *
+ * We lost an unknown portion of the CYC payload for the current MTC
+ * period, but it's usually better to run too slow than too fast.
+ */
+ if (total_cyc < old_cyc)
+ total_cyc = old_cyc;
+
+ *cyc = total_cyc - old_cyc;
+ return 0;
+}
+
+int pt_time_update_cyc(struct pt_time *time,
+ const struct pt_packet_cyc *packet,
+ const struct pt_config *config, uint64_t fcr)
+{
+ uint64_t cyc, fc;
+
+ if (!time || !packet || !config)
+ return -pte_internal;
+
+ if (!fcr) {
+ time->lost_cyc += 1;
+ return 0;
+ }
+
+ cyc = packet->value;
+ fc = time->fc;
+ if (!fc) {
+ int errcode;
+
+ errcode = pt_time_adjust_cyc(&cyc, time, config, fcr);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ fc += (cyc * fcr) >> pt_tcal_fcr_shr;
+
+ time->fc = fc;
+ time->tsc = time->base + fc;
+
+ return 0;
+}
+
+void pt_tcal_init(struct pt_time_cal *tcal)
+{
+ if (!tcal)
+ return;
+
+ memset(tcal, 0, sizeof(*tcal));
+
+ tcal->min_fcr = UINT64_MAX;
+}
+
+static int pt_tcal_have_fcr(const struct pt_time_cal *tcal)
+{
+ if (!tcal)
+ return 0;
+
+ return (tcal->min_fcr <= tcal->max_fcr);
+}
+
+int pt_tcal_fcr(uint64_t *fcr, const struct pt_time_cal *tcal)
+{
+ if (!fcr || !tcal)
+ return -pte_internal;
+
+ if (!pt_tcal_have_fcr(tcal))
+ return -pte_no_time;
+
+ *fcr = tcal->fcr;
+
+ return 0;
+}
+
+int pt_tcal_set_fcr(struct pt_time_cal *tcal, uint64_t fcr)
+{
+ if (!tcal)
+ return -pte_internal;
+
+ tcal->fcr = fcr;
+
+ if (fcr < tcal->min_fcr)
+ tcal->min_fcr = fcr;
+
+ if (fcr > tcal->max_fcr)
+ tcal->max_fcr = fcr;
+
+ return 0;
+}
+
+int pt_tcal_update_tsc(struct pt_time_cal *tcal,
+ const struct pt_packet_tsc *packet,
+ const struct pt_config *config)
+{
+ (void) config;
+
+ if (!tcal || !packet)
+ return -pte_internal;
+
+ /* A TSC outside of PSB+ may indicate loss of time. We do not use it
+ * for calibration. We store the TSC value for calibration at the next
+ * TSC in PSB+, though.
+ */
+ tcal->tsc = packet->tsc;
+ tcal->cyc_tsc = 0ull;
+
+ return 0;
+}
+
+int pt_tcal_header_tsc(struct pt_time_cal *tcal,
+ const struct pt_packet_tsc *packet,
+ const struct pt_config *config)
+{
+ uint64_t tsc, last_tsc, tsc_delta, cyc, fcr;
+
+ (void) config;
+
+ if (!tcal || !packet)
+ return -pte_internal;
+
+ last_tsc = tcal->tsc;
+ cyc = tcal->cyc_tsc;
+
+ tsc = packet->tsc;
+
+ tcal->tsc = tsc;
+ tcal->cyc_tsc = 0ull;
+
+ if (!last_tsc || !cyc)
+ return 0;
+
+ /* Correct a single wrap-around. */
+ if (tsc < last_tsc) {
+ tsc += 1ull << pt_pl_tsc_bit_size;
+
+ if (tsc < last_tsc)
+ return -pte_bad_packet;
+ }
+
+ tsc_delta = tsc - last_tsc;
+
+ /* We shift the nominator to improve rounding precision.
+ *
+ * Since we're only collecting the CYCs between two TSC, we shouldn't
+ * overflow. Let's rather fail than overflow.
+ */
+ if (tsc_delta & ~(~0ull >> pt_tcal_fcr_shr))
+ return -pte_internal;
+
+ fcr = (tsc_delta << pt_tcal_fcr_shr) / cyc;
+
+ return pt_tcal_set_fcr(tcal, fcr);
+}
+
+int pt_tcal_update_cbr(struct pt_time_cal *tcal,
+ const struct pt_packet_cbr *packet,
+ const struct pt_config *config)
+{
+ /* A CBR outside of PSB+ indicates a frequency change. Reset our
+ * calibration state.
+ */
+ pt_tcal_init(tcal);
+
+ return pt_tcal_header_cbr(tcal, packet, config);
+}
+
+int pt_tcal_header_cbr(struct pt_time_cal *tcal,
+ const struct pt_packet_cbr *packet,
+ const struct pt_config *config)
+{
+ uint64_t cbr, p1, fcr;
+
+ if (!tcal || !packet || !config)
+ return -pte_internal;
+
+ p1 = config->nom_freq;
+ if (!p1)
+ return 0;
+
+ /* If we know the nominal frequency, we can use it for calibration. */
+ cbr = packet->ratio;
+
+ fcr = (p1 << pt_tcal_fcr_shr) / cbr;
+
+ return pt_tcal_set_fcr(tcal, fcr);
+}
+
+int pt_tcal_update_tma(struct pt_time_cal *tcal,
+ const struct pt_packet_tma *packet,
+ const struct pt_config *config)
+{
+ (void) tcal;
+ (void) packet;
+ (void) config;
+
+ /* Nothing to do. */
+ return 0;
+}
+
+int pt_tcal_update_mtc(struct pt_time_cal *tcal,
+ const struct pt_packet_mtc *packet,
+ const struct pt_config *config)
+{
+ uint32_t last_ctc, ctc, ctc_delta, have_mtc;
+ uint64_t cyc, fc, fcr;
+ int errcode;
+
+ if (!tcal || !packet || !config)
+ return -pte_internal;
+
+ last_ctc = tcal->ctc;
+ have_mtc = tcal->have_mtc;
+ cyc = tcal->cyc_mtc;
+
+ ctc = packet->ctc << config->mtc_freq;
+
+ /* We need at least two MTC (including this). */
+ if (!have_mtc) {
+ tcal->cyc_mtc = 0ull;
+ tcal->ctc = ctc;
+ tcal->have_mtc = 1;
+
+ return 0;
+ }
+
+ /* Without any cycles, we can't calibrate. Try again at the next
+ * MTC and distribute the cycles over the combined MTC period.
+ */
+ if (!cyc)
+ return 0;
+
+ /* Prepare for the next packet in case we error out below. */
+ tcal->have_mtc = 1;
+ tcal->cyc_mtc = 0ull;
+ tcal->ctc = ctc;
+
+ /* Let's pretend we will fail. We'll correct it at the end. */
+ tcal->lost_mtc += 1;
+
+ errcode = pt_time_ctc_delta(&ctc_delta, ctc, last_ctc, config);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_time_ctc_fc(&fc, ctc_delta, config);
+ if (errcode < 0)
+ return errcode;
+
+ /* We shift the nominator to improve rounding precision.
+ *
+ * Since we're only collecting the CYCs between two MTC, we shouldn't
+ * overflow. Let's rather fail than overflow.
+ */
+ if (fc & ~(~0ull >> pt_tcal_fcr_shr))
+ return -pte_internal;
+
+ fcr = (fc << pt_tcal_fcr_shr) / cyc;
+
+ errcode = pt_tcal_set_fcr(tcal, fcr);
+ if (errcode < 0)
+ return errcode;
+
+ /* We updated the FCR. This recovers from previous MTC losses. */
+ tcal->lost_mtc = 0;
+
+ return 0;
+}
+
+int pt_tcal_update_cyc(struct pt_time_cal *tcal,
+ const struct pt_packet_cyc *packet,
+ const struct pt_config *config)
+{
+ uint64_t cyc;
+
+ (void) config;
+
+ if (!tcal || !packet)
+ return -pte_internal;
+
+ cyc = packet->value;
+ tcal->cyc_mtc += cyc;
+ tcal->cyc_tsc += cyc;
+
+ return 0;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_tnt_cache.c b/contrib/processor-trace/libipt/src/pt_tnt_cache.c
new file mode 100644
index 0000000000000..453663063c7fe
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_tnt_cache.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_tnt_cache.h"
+
+#include "intel-pt.h"
+
+
+void pt_tnt_cache_init(struct pt_tnt_cache *cache)
+{
+ if (!cache)
+ return;
+
+ cache->tnt = 0ull;
+ cache->index = 0ull;
+}
+
+int pt_tnt_cache_is_empty(const struct pt_tnt_cache *cache)
+{
+ if (!cache)
+ return -pte_invalid;
+
+ return cache->index == 0;
+}
+
+int pt_tnt_cache_query(struct pt_tnt_cache *cache)
+{
+ int taken;
+
+ if (!cache)
+ return -pte_invalid;
+
+ if (!cache->index)
+ return -pte_bad_query;
+
+ taken = (cache->tnt & cache->index) != 0;
+ cache->index >>= 1;
+
+ return taken;
+}
+
+int pt_tnt_cache_update_tnt(struct pt_tnt_cache *cache,
+ const struct pt_packet_tnt *packet,
+ const struct pt_config *config)
+{
+ uint8_t bit_size;
+
+ (void) config;
+
+ if (!cache || !packet)
+ return -pte_invalid;
+
+ if (cache->index)
+ return -pte_bad_context;
+
+ bit_size = packet->bit_size;
+ if (!bit_size)
+ return -pte_bad_packet;
+
+ cache->tnt = packet->payload;
+ cache->index = 1ull << (bit_size - 1);
+
+ return 0;
+}
diff --git a/contrib/processor-trace/libipt/src/pt_version.c b/contrib/processor-trace/libipt/src/pt_version.c
new file mode 100644
index 0000000000000..09d79573e9116
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/pt_version.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "intel-pt.h"
+
+
+struct pt_version pt_library_version(void)
+{
+ struct pt_version v = {
+ /* .major = */ PT_VERSION_MAJOR,
+ /* .minor = */ PT_VERSION_MINOR,
+ /* .reserved = */ 0,
+ /* .build = */ PT_VERSION_BUILD,
+ /* .ext = */ PT_VERSION_EXT
+ };
+
+ return v;
+}
diff --git a/contrib/processor-trace/libipt/src/windows/init.c b/contrib/processor-trace/libipt/src/windows/init.c
new file mode 100644
index 0000000000000..f679be2746f83
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/windows/init.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_ild.h"
+
+#include <windows.h>
+
+
+BOOLEAN WINAPI DllMain(HINSTANCE handle, DWORD reason, LPVOID reserved)
+{
+ (void) handle;
+ (void) reserved;
+
+ switch (reason) {
+ case DLL_PROCESS_ATTACH:
+ /* Initialize the Intel(R) Processor Trace instruction
+ decoder. */
+ pt_ild_init();
+ break;
+
+ default:
+ break;
+ }
+
+ return TRUE;
+}
diff --git a/contrib/processor-trace/libipt/src/windows/pt_cpuid.c b/contrib/processor-trace/libipt/src/windows/pt_cpuid.c
new file mode 100644
index 0000000000000..40013b92fa519
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/windows/pt_cpuid.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_cpuid.h"
+
+#include <intrin.h>
+
+extern void pt_cpuid(uint32_t leaf, uint32_t *eax, uint32_t *ebx,
+ uint32_t *ecx, uint32_t *edx)
+{
+ int cpu_info[4];
+
+ __cpuid(cpu_info, leaf);
+ *eax = cpu_info[0];
+ *ebx = cpu_info[1];
+ *ecx = cpu_info[2];
+ *edx = cpu_info[3];
+}
diff --git a/contrib/processor-trace/libipt/src/windows/pt_section_windows.c b/contrib/processor-trace/libipt/src/windows/pt_section_windows.c
new file mode 100644
index 0000000000000..73b447a44dafd
--- /dev/null
+++ b/contrib/processor-trace/libipt/src/windows/pt_section_windows.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_section.h"
+#include "pt_section_windows.h"
+#include "pt_section_file.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <io.h>
+
+
+static int pt_sec_windows_fstat(const char *filename, struct _stat *stat)
+{
+ int fd, errcode;
+
+ if (!filename || !stat)
+ return -pte_internal;
+
+ fd = _open(filename, _O_RDONLY);
+ if (fd == -1)
+ return -pte_bad_image;
+
+ errcode = _fstat(fd, stat);
+
+ _close(fd);
+
+ if (errcode)
+ return -pte_bad_image;
+
+ return 0;
+}
+
+int pt_section_mk_status(void **pstatus, uint64_t *psize, const char *filename)
+{
+ struct pt_sec_windows_status *status;
+ struct _stat stat;
+ int errcode;
+
+ if (!pstatus || !psize)
+ return -pte_internal;
+
+ errcode = pt_sec_windows_fstat(filename, &stat);
+ if (errcode < 0)
+ return errcode;
+
+ if (stat.st_size < 0)
+ return -pte_bad_image;
+
+ status = malloc(sizeof(*status));
+ if (!status)
+ return -pte_nomem;
+
+ status->stat = stat;
+
+ *pstatus = status;
+ *psize = stat.st_size;
+
+ return 0;
+}
+
+static int check_file_status(struct pt_section *section, int fd)
+{
+ struct pt_sec_windows_status *status;
+ struct _stat stat;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = _fstat(fd, &stat);
+ if (errcode)
+ return -pte_bad_image;
+
+ status = section->status;
+ if (!status)
+ return -pte_internal;
+
+ if (stat.st_size != status->stat.st_size)
+ return -pte_bad_image;
+
+ if (stat.st_mtime != status->stat.st_mtime)
+ return -pte_bad_image;
+
+ return 0;
+}
+
+static DWORD granularity(void)
+{
+ struct _SYSTEM_INFO sysinfo;
+
+ GetSystemInfo(&sysinfo);
+
+ return sysinfo.dwAllocationGranularity;
+}
+
+int pt_sec_windows_map(struct pt_section *section, int fd)
+{
+ struct pt_sec_windows_mapping *mapping;
+ uint64_t offset, size, adjustment;
+ HANDLE fh, mh;
+ DWORD dsize;
+ uint8_t *base;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ offset = section->offset;
+ size = section->size;
+
+ adjustment = offset % granularity();
+
+ offset -= adjustment;
+ size += adjustment;
+
+ /* The section is supposed to fit into the file so we shouldn't
+ * see any overflows, here.
+ */
+ if (size < section->size)
+ return -pte_internal;
+
+ dsize = (DWORD) size;
+ if ((uint64_t) dsize != size)
+ return -pte_internal;
+
+ fh = (HANDLE) _get_osfhandle(fd);
+
+ mh = CreateFileMapping(fh, NULL, PAGE_READONLY, 0, 0, NULL);
+ if (!mh)
+ return -pte_bad_image;
+
+ base = MapViewOfFile(mh, FILE_MAP_READ, (DWORD) (offset >> 32),
+ (DWORD) (uint32_t) offset, dsize);
+ if (!base) {
+ errcode = -pte_bad_image;
+ goto out_mh;
+ }
+
+ mapping = malloc(sizeof(*mapping));
+ if (!mapping) {
+ errcode = -pte_nomem;
+ goto out_map;
+ }
+
+ mapping->fd = fd;
+ mapping->mh = mh;
+ mapping->base = base;
+ mapping->begin = base + adjustment;
+ mapping->end = base + size;
+
+ section->mapping = mapping;
+ section->unmap = pt_sec_windows_unmap;
+ section->read = pt_sec_windows_read;
+ section->memsize = pt_sec_windows_memsize;
+
+ return 0;
+
+out_map:
+ UnmapViewOfFile(base);
+
+out_mh:
+ CloseHandle(mh);
+ return errcode;
+}
+
+static int pt_sec_windows_map_success(struct pt_section *section)
+{
+ uint16_t mcount;
+ int errcode, status;
+
+ if (!section)
+ return -pte_internal;
+
+ mcount = section->mcount + 1;
+ if (!mcount) {
+ (void) pt_section_unlock(section);
+ return -pte_overflow;
+ }
+
+ section->mcount = mcount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_section_on_map(section);
+ if (status < 0) {
+ /* We had to release the section lock for pt_section_on_map() so
+ * @section may have meanwhile been mapped by other threads.
+ *
+ * We still want to return the error so we release our mapping.
+ * Our caller does not yet know whether pt_section_map()
+ * succeeded.
+ */
+ (void) pt_section_unmap(section);
+ return status;
+ }
+
+ return 0;
+}
+
+int pt_section_map(struct pt_section *section)
+{
+ const char *filename;
+ HANDLE fh;
+ FILE *file;
+ int fd, errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (section->mcount)
+ return pt_sec_windows_map_success(section);
+
+ if (section->mapping) {
+ errcode = -pte_internal;
+ goto out_unlock;
+ }
+
+ filename = section->filename;
+ if (!filename) {
+ errcode = -pte_internal;
+ goto out_unlock;
+ }
+
+ fh = CreateFile(filename, GENERIC_READ, FILE_SHARE_READ, NULL,
+ OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (fh == INVALID_HANDLE_VALUE) {
+ /* We failed to open the file read-only. Let's try to open it
+ * read-write; maybe our user has the file open for writing.
+ *
+ * We will detect changes to the file via fstat().
+ */
+
+ fh = CreateFile(filename, GENERIC_READ, FILE_SHARE_WRITE, NULL,
+ OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (fh == INVALID_HANDLE_VALUE) {
+ errcode = -pte_bad_image;
+ goto out_unlock;
+ }
+ }
+
+ fd = _open_osfhandle((intptr_t) fh, _O_RDONLY);
+ if (fd == -1) {
+ errcode = -pte_bad_image;
+ goto out_fh;
+ }
+
+ errcode = check_file_status(section, fd);
+ if (errcode < 0) {
+ errcode = -pte_bad_image;
+ goto out_fd;
+ }
+
+ /* We leave the file open on success. It will be closed when the
+ * section is unmapped.
+ */
+ errcode = pt_sec_windows_map(section, fd);
+ if (!errcode)
+ return pt_sec_windows_map_success(section);
+
+ /* Fall back to file based sections - report the original error
+ * if we fail to convert the file descriptor.
+ */
+ file = _fdopen(fd, "rb");
+ if (!file) {
+ errcode = -pte_bad_image;
+ goto out_fd;
+ }
+
+ /* We need to keep the file open on success. It will be closed when
+ * the section is unmapped.
+ */
+ errcode = pt_sec_file_map(section, file);
+ if (!errcode)
+ return pt_sec_windows_map_success(section);
+
+ fclose(file);
+ goto out_unlock;
+
+out_fd:
+ _close(fd);
+ return errcode;
+
+out_fh:
+ CloseHandle(fh);
+
+out_unlock:
+ (void) pt_section_unlock(section);
+ return errcode;
+}
+
+int pt_sec_windows_unmap(struct pt_section *section)
+{
+ struct pt_sec_windows_mapping *mapping;
+
+ if (!section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping || !section->unmap || !section->read || !section->memsize)
+ return -pte_internal;
+
+ section->mapping = NULL;
+ section->unmap = NULL;
+ section->read = NULL;
+ section->memsize = NULL;
+
+ UnmapViewOfFile(mapping->begin);
+ CloseHandle(mapping->mh);
+ _close(mapping->fd);
+ free(mapping);
+
+ return 0;
+}
+
+int pt_sec_windows_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ struct pt_sec_windows_mapping *mapping;
+ const uint8_t *begin;
+
+ if (!buffer || !section)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping)
+ return -pte_internal;
+
+ /* We already checked in pt_section_read() that the requested memory
+ * lies within the section's boundaries.
+ *
+ * And we checked that the entire section was mapped. There's no need
+ * to check for overflows, again.
+ */
+ begin = mapping->begin + offset;
+
+ memcpy(buffer, begin, size);
+ return (int) size;
+}
+
+
+int pt_sec_windows_memsize(const struct pt_section *section, uint64_t *size)
+{
+ struct pt_sec_windows_mapping *mapping;
+ const uint8_t *begin, *end;
+
+ if (!section || !size)
+ return -pte_internal;
+
+ mapping = section->mapping;
+ if (!mapping)
+ return -pte_internal;
+
+ begin = mapping->base;
+ end = mapping->end;
+
+ if (!begin || !end || end < begin)
+ return -pte_internal;
+
+ *size = (uint64_t) (end - begin);
+
+ return 0;
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-asid.c b/contrib/processor-trace/libipt/test/src/ptunit-asid.c
new file mode 100644
index 0000000000000..5622fa64f7138
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-asid.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_asid.h"
+
+#include "intel-pt.h"
+
+#include <stddef.h>
+
+
+static struct ptunit_result from_user_null(void)
+{
+ struct pt_asid user;
+ int errcode;
+
+ pt_asid_init(&user);
+
+ errcode = pt_asid_from_user(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_asid_from_user(NULL, &user);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_default(void)
+{
+ struct pt_asid asid;
+ int errcode;
+
+ errcode = pt_asid_from_user(&asid, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(asid.size, sizeof(asid));
+ ptu_uint_eq(asid.cr3, pt_asid_no_cr3);
+ ptu_uint_eq(asid.vmcs, pt_asid_no_vmcs);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_small(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ user.size = sizeof(user.size);
+
+ errcode = pt_asid_from_user(&asid, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(asid.size, sizeof(asid));
+ ptu_uint_eq(asid.cr3, pt_asid_no_cr3);
+ ptu_uint_eq(asid.vmcs, pt_asid_no_vmcs);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_big(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ user.size = sizeof(user) + 4;
+ user.cr3 = 0x4200ull;
+ user.vmcs = 0x23000ull;
+
+ errcode = pt_asid_from_user(&asid, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(asid.size, sizeof(asid));
+ ptu_uint_eq(asid.cr3, 0x4200ull);
+ ptu_uint_eq(asid.vmcs, 0x23000ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ user.size = sizeof(user);
+ user.cr3 = 0x4200ull;
+ user.vmcs = 0x23000ull;
+
+ errcode = pt_asid_from_user(&asid, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(asid.size, sizeof(asid));
+ ptu_uint_eq(asid.cr3, 0x4200ull);
+ ptu_uint_eq(asid.vmcs, 0x23000ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_cr3(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ user.size = offsetof(struct pt_asid, vmcs);
+ user.cr3 = 0x4200ull;
+ user.vmcs = 0x23000ull;
+
+ errcode = pt_asid_from_user(&asid, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(asid.size, sizeof(asid));
+ ptu_uint_eq(asid.cr3, 0x4200ull);
+ ptu_uint_eq(asid.vmcs, pt_asid_no_vmcs);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result to_user_null(void)
+{
+ struct pt_asid asid;
+ int errcode;
+
+ pt_asid_init(&asid);
+
+ errcode = pt_asid_to_user(NULL, NULL, sizeof(asid));
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_asid_to_user(NULL, &asid, sizeof(asid));
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result to_user_too_small(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ pt_asid_init(&asid);
+
+ errcode = pt_asid_to_user(&user, &asid, 0);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_asid_to_user(&user, &asid, sizeof(user.size) - 1);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result to_user_small(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ memset(&user, 0xcc, sizeof(user));
+ pt_asid_init(&asid);
+
+ errcode = pt_asid_to_user(&user, &asid, sizeof(user.size));
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(user.size, sizeof(user.size));
+ ptu_uint_eq(user.cr3, 0xccccccccccccccccull);
+ ptu_uint_eq(user.vmcs, 0xccccccccccccccccull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result to_user_big(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ memset(&user, 0xcc, sizeof(user));
+ pt_asid_init(&asid);
+ asid.cr3 = 0x4200ull;
+ asid.vmcs = 0x23000ull;
+
+ errcode = pt_asid_to_user(&user, &asid, sizeof(user) + 8);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(user.size, sizeof(asid));
+ ptu_uint_eq(user.cr3, 0x4200ull);
+ ptu_uint_eq(user.vmcs, 0x23000ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result to_user(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ memset(&user, 0xcc, sizeof(user));
+ pt_asid_init(&asid);
+ asid.cr3 = 0x4200ull;
+ asid.vmcs = 0x23000ull;
+
+ errcode = pt_asid_to_user(&user, &asid, sizeof(user));
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(user.size, sizeof(asid));
+ ptu_uint_eq(user.cr3, 0x4200ull);
+ ptu_uint_eq(user.vmcs, 0x23000ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result to_user_cr3(void)
+{
+ struct pt_asid asid, user;
+ int errcode;
+
+ memset(&user, 0xcc, sizeof(user));
+ pt_asid_init(&asid);
+ asid.cr3 = 0x4200ull;
+
+ errcode = pt_asid_to_user(&user, &asid, offsetof(struct pt_asid, vmcs));
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(user.size, offsetof(struct pt_asid, vmcs));
+ ptu_uint_eq(user.cr3, 0x4200ull);
+ ptu_uint_eq(user.vmcs, 0xccccccccccccccccull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_null(void)
+{
+ struct pt_asid asid;
+ int errcode;
+
+ pt_asid_init(&asid);
+
+ errcode = pt_asid_match(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_asid_match(NULL, &asid);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_asid_match(&asid, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_default(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ lhs.cr3 = 0x2300ull;
+ lhs.vmcs = 0x42000ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ errcode = pt_asid_match(&rhs, &lhs);
+ ptu_int_eq(errcode, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_default_mixed(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ lhs.cr3 = 0x2300ull;
+ rhs.vmcs = 0x42000ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ errcode = pt_asid_match(&rhs, &lhs);
+ ptu_int_eq(errcode, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_cr3(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ lhs.cr3 = 0x2300ull;
+ rhs.cr3 = 0x2300ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_vmcs(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ lhs.vmcs = 0x23000ull;
+ rhs.vmcs = 0x23000ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ lhs.cr3 = 0x2300ull;
+ rhs.cr3 = 0x2300ull;
+ lhs.vmcs = 0x23000ull;
+ rhs.vmcs = 0x23000ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_cr3_false(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ lhs.cr3 = 0x4200ull;
+ rhs.cr3 = 0x2300ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result match_vmcs_false(void)
+{
+ struct pt_asid lhs, rhs;
+ int errcode;
+
+ pt_asid_init(&lhs);
+ pt_asid_init(&rhs);
+
+ lhs.vmcs = 0x42000ull;
+ rhs.vmcs = 0x23000ull;
+
+ errcode = pt_asid_match(&lhs, &rhs);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, from_user_null);
+ ptu_run(suite, from_user_default);
+ ptu_run(suite, from_user_small);
+ ptu_run(suite, from_user_big);
+ ptu_run(suite, from_user);
+ ptu_run(suite, from_user_cr3);
+
+ ptu_run(suite, to_user_null);
+ ptu_run(suite, to_user_too_small);
+ ptu_run(suite, to_user_small);
+ ptu_run(suite, to_user_big);
+ ptu_run(suite, to_user);
+ ptu_run(suite, to_user_cr3);
+
+ ptu_run(suite, match_null);
+ ptu_run(suite, match_default);
+ ptu_run(suite, match_default_mixed);
+ ptu_run(suite, match_cr3);
+ ptu_run(suite, match_vmcs);
+ ptu_run(suite, match);
+ ptu_run(suite, match_cr3_false);
+ ptu_run(suite, match_vmcs_false);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-block_cache.c b/contrib/processor-trace/libipt/test/src/ptunit-block_cache.c
new file mode 100644
index 0000000000000..8d9b8889b8abf
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-block_cache.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit_threads.h"
+
+#include "pt_block_cache.h"
+
+#include <string.h>
+
+
+/* A test fixture optionally providing a block cache and automatically freeing
+ * the cache.
+ */
+struct bcache_fixture {
+ /* Threading support. */
+ struct ptunit_thrd_fixture thrd;
+
+ /* The cache - it will be freed automatically. */
+ struct pt_block_cache *bcache;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct bcache_fixture *);
+ struct ptunit_result (*fini)(struct bcache_fixture *);
+};
+
+enum {
+ /* The number of entries in fixture-provided caches. */
+ bfix_nentries = 0x10000,
+
+#if defined(FEATURE_THREADS)
+
+ /* The number of additional threads to use for stress testing. */
+ bfix_threads = 3,
+
+#endif /* defined(FEATURE_THREADS) */
+
+ /* The number of iterations in stress testing. */
+ bfix_iterations = 0x10
+};
+
+static struct ptunit_result cfix_init(struct bcache_fixture *bfix)
+{
+ ptu_test(ptunit_thrd_init, &bfix->thrd);
+
+ bfix->bcache = NULL;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bfix_init(struct bcache_fixture *bfix)
+{
+ ptu_test(cfix_init, bfix);
+
+ bfix->bcache = pt_bcache_alloc(bfix_nentries);
+ ptu_ptr(bfix->bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bfix_fini(struct bcache_fixture *bfix)
+{
+ int thrd;
+
+ ptu_test(ptunit_thrd_fini, &bfix->thrd);
+
+ for (thrd = 0; thrd < bfix->thrd.nthreads; ++thrd)
+ ptu_int_eq(bfix->thrd.result[thrd], 0);
+
+ pt_bcache_free(bfix->bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bcache_entry_size(void)
+{
+ ptu_uint_eq(sizeof(struct pt_bcache_entry), sizeof(uint32_t));
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bcache_size(void)
+{
+ ptu_uint_le(sizeof(struct pt_block_cache),
+ 2 * sizeof(struct pt_bcache_entry));
+
+ return ptu_passed();
+}
+
+static struct ptunit_result free_null(void)
+{
+ pt_bcache_free(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_null(void)
+{
+ struct pt_bcache_entry bce;
+ int errcode;
+
+ memset(&bce, 0, sizeof(bce));
+
+ errcode = pt_bcache_add(NULL, 0ull, bce);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lookup_null(void)
+{
+ struct pt_bcache_entry bce;
+ struct pt_block_cache bcache;
+ int errcode;
+
+ errcode = pt_bcache_lookup(&bce, NULL, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_bcache_lookup(NULL, &bcache, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result alloc(struct bcache_fixture *bfix)
+{
+ bfix->bcache = pt_bcache_alloc(0x10000ull);
+ ptu_ptr(bfix->bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result alloc_min(struct bcache_fixture *bfix)
+{
+ bfix->bcache = pt_bcache_alloc(1ull);
+ ptu_ptr(bfix->bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result alloc_too_big(struct bcache_fixture *bfix)
+{
+ bfix->bcache = pt_bcache_alloc(UINT32_MAX + 1ull);
+ ptu_null(bfix->bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result alloc_zero(struct bcache_fixture *bfix)
+{
+ bfix->bcache = pt_bcache_alloc(0ull);
+ ptu_null(bfix->bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result initially_empty(struct bcache_fixture *bfix)
+{
+ uint64_t index;
+
+ for (index = 0; index < bfix_nentries; ++index) {
+ struct pt_bcache_entry bce;
+ int status;
+
+ memset(&bce, 0xff, sizeof(bce));
+
+ status = pt_bcache_lookup(&bce, bfix->bcache, index);
+ ptu_int_eq(status, 0);
+
+ status = pt_bce_is_valid(bce);
+ ptu_int_eq(status, 0);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_bad_index(struct bcache_fixture *bfix)
+{
+ struct pt_bcache_entry bce;
+ int errcode;
+
+ memset(&bce, 0, sizeof(bce));
+
+ errcode = pt_bcache_add(bfix->bcache, bfix_nentries, bce);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lookup_bad_index(struct bcache_fixture *bfix)
+{
+ struct pt_bcache_entry bce;
+ int errcode;
+
+ errcode = pt_bcache_lookup(&bce, bfix->bcache, bfix_nentries);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add(struct bcache_fixture *bfix, uint64_t index)
+{
+ struct pt_bcache_entry bce, exp;
+ int errcode;
+
+ memset(&bce, 0xff, sizeof(bce));
+ memset(&exp, 0x00, sizeof(exp));
+
+ exp.ninsn = 1;
+ exp.displacement = 7;
+ exp.mode = ptem_64bit;
+ exp.qualifier = ptbq_decode;
+ exp.isize = 7;
+
+ errcode = pt_bcache_add(bfix->bcache, index, exp);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_bcache_lookup(&bce, bfix->bcache, index);
+ ptu_int_eq(errcode, 0);
+
+ ptu_uint_eq(bce.ninsn, exp.ninsn);
+ ptu_int_eq(bce.displacement, exp.displacement);
+ ptu_uint_eq(pt_bce_exec_mode(bce), pt_bce_exec_mode(exp));
+ ptu_uint_eq(pt_bce_qualifier(bce), pt_bce_qualifier(exp));
+ ptu_uint_eq(bce.isize, exp.isize);
+
+ return ptu_passed();
+}
+
+static int worker(void *arg)
+{
+ struct pt_bcache_entry exp;
+ struct pt_block_cache *bcache;
+ uint64_t iter, index;
+
+ bcache = arg;
+ if (!bcache)
+ return -pte_internal;
+
+ memset(&exp, 0x00, sizeof(exp));
+ exp.ninsn = 5;
+ exp.displacement = 28;
+ exp.mode = ptem_64bit;
+ exp.qualifier = ptbq_again;
+ exp.isize = 3;
+
+ for (index = 0; index < bfix_nentries; ++index) {
+ for (iter = 0; iter < bfix_iterations; ++iter) {
+ struct pt_bcache_entry bce;
+ int errcode;
+
+ memset(&bce, 0xff, sizeof(bce));
+
+ errcode = pt_bcache_lookup(&bce, bcache, index);
+ if (errcode < 0)
+ return errcode;
+
+ if (!pt_bce_is_valid(bce)) {
+ errcode = pt_bcache_add(bcache, index, exp);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ errcode = pt_bcache_lookup(&bce, bcache, index);
+ if (errcode < 0)
+ return errcode;
+
+ if (!pt_bce_is_valid(bce))
+ return -pte_nosync;
+
+ if (bce.ninsn != exp.ninsn)
+ return -pte_nosync;
+
+ if (bce.displacement != exp.displacement)
+ return -pte_nosync;
+
+ if (pt_bce_exec_mode(bce) != pt_bce_exec_mode(exp))
+ return -pte_nosync;
+
+ if (pt_bce_qualifier(bce) != pt_bce_qualifier(exp))
+ return -pte_nosync;
+
+ if (bce.isize != exp.isize)
+ return -pte_nosync;
+ }
+ }
+
+ return 0;
+}
+
+static struct ptunit_result stress(struct bcache_fixture *bfix)
+{
+ int errcode;
+
+#if defined(FEATURE_THREADS)
+ {
+ int thrd;
+
+ for (thrd = 0; thrd < bfix_threads; ++thrd)
+ ptu_test(ptunit_thrd_create, &bfix->thrd, worker,
+ bfix->bcache);
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ errcode = worker(bfix->bcache);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct bcache_fixture bfix, cfix;
+ struct ptunit_suite suite;
+
+ bfix.init = bfix_init;
+ bfix.fini = bfix_fini;
+
+ cfix.init = cfix_init;
+ cfix.fini = bfix_fini;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, bcache_entry_size);
+ ptu_run(suite, bcache_size);
+
+ ptu_run(suite, free_null);
+ ptu_run(suite, add_null);
+ ptu_run(suite, lookup_null);
+
+ ptu_run_f(suite, alloc, cfix);
+ ptu_run_f(suite, alloc_min, cfix);
+ ptu_run_f(suite, alloc_too_big, cfix);
+ ptu_run_f(suite, alloc_zero, cfix);
+
+ ptu_run_f(suite, initially_empty, bfix);
+
+ ptu_run_f(suite, add_bad_index, bfix);
+ ptu_run_f(suite, lookup_bad_index, bfix);
+
+ ptu_run_fp(suite, add, bfix, 0ull);
+ ptu_run_fp(suite, add, bfix, bfix_nentries - 1ull);
+ ptu_run_f(suite, stress, bfix);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-config.c b/contrib/processor-trace/libipt/test/src/ptunit-config.c
new file mode 100644
index 0000000000000..a4332ec112f00
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-config.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_config.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+#include <stddef.h>
+
+
+/* A global fake buffer to pacify static analyzers. */
+static uint8_t buffer[8];
+
+static struct ptunit_result from_user_null(void)
+{
+ struct pt_config config;
+ int errcode;
+
+ errcode = pt_config_from_user(NULL, &config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_config_from_user(&config, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_too_small(void)
+{
+ struct pt_config config, user;
+ int errcode;
+
+ user.size = sizeof(config.size);
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_bad_buffer(void)
+{
+ struct pt_config config, user;
+ int errcode;
+
+ pt_config_init(&user);
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ user.begin = buffer;
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ user.begin = NULL;
+ user.end = buffer;
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ user.begin = &buffer[1];
+ user.end = buffer;
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, -pte_bad_config);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user(void)
+{
+ struct pt_config config, user;
+ int errcode;
+
+ user.size = sizeof(user);
+ user.begin = buffer;
+ user.end = &buffer[sizeof(buffer)];
+ user.cpu.vendor = pcv_intel;
+ user.errata.bdm70 = 1;
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(config.size, sizeof(config));
+ ptu_ptr_eq(config.begin, buffer);
+ ptu_ptr_eq(config.end, &buffer[sizeof(buffer)]);
+ ptu_int_eq(config.cpu.vendor, pcv_intel);
+ ptu_uint_eq(config.errata.bdm70, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_small(void)
+{
+ struct pt_config config, user;
+ int errcode;
+
+ memset(&config, 0xcd, sizeof(config));
+
+ user.size = offsetof(struct pt_config, cpu);
+ user.begin = buffer;
+ user.end = &buffer[sizeof(buffer)];
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(config.size, offsetof(struct pt_config, cpu));
+ ptu_ptr_eq(config.begin, buffer);
+ ptu_ptr_eq(config.end, &buffer[sizeof(buffer)]);
+ ptu_int_eq(config.cpu.vendor, pcv_unknown);
+ ptu_uint_eq(config.errata.bdm70, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result from_user_big(void)
+{
+ struct pt_config config, user;
+ int errcode;
+
+ user.size = sizeof(user) + 4;
+ user.begin = buffer;
+ user.end = &buffer[sizeof(buffer)];
+ user.cpu.vendor = pcv_intel;
+ user.errata.bdm70 = 1;
+
+ errcode = pt_config_from_user(&config, &user);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(config.size, sizeof(config));
+ ptu_ptr_eq(config.begin, buffer);
+ ptu_ptr_eq(config.end, &buffer[sizeof(buffer)]);
+ ptu_int_eq(config.cpu.vendor, pcv_intel);
+ ptu_uint_eq(config.errata.bdm70, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result size(void)
+{
+ ptu_uint_eq(sizeof(struct pt_errata), 16 * 4);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_size(void)
+{
+ struct pt_conf_addr_filter conf;
+
+ ptu_uint_eq(sizeof(conf.config), 8);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_none(void)
+{
+ struct pt_config config;
+ uint8_t filter;
+
+ pt_config_init(&config);
+
+ ptu_uint_eq(config.addr_filter.config.addr_cfg, 0ull);
+
+ for (filter = 0; filter < 4; ++filter) {
+ uint32_t addr_cfg;
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, filter);
+
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_disabled);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_0(void)
+{
+ struct pt_config config;
+ uint64_t addr_a, addr_b;
+ uint32_t addr_cfg;
+ uint8_t filter;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr0_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr0_a = 0xa000ull;
+ config.addr_filter.addr0_b = 0xb000ull;
+
+ ptu_uint_ne(config.addr_filter.config.addr_cfg, 0ull);
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, 0);
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_filter);
+
+ addr_a = pt_filter_addr_a(&config.addr_filter, 0);
+ ptu_uint_eq(addr_a, 0xa000ull);
+
+ addr_b = pt_filter_addr_b(&config.addr_filter, 0);
+ ptu_uint_eq(addr_b, 0xb000ull);
+
+ for (filter = 1; filter < 4; ++filter) {
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, filter);
+
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_disabled);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_1_3(void)
+{
+ struct pt_config config;
+ uint64_t addr_a, addr_b;
+ uint32_t addr_cfg;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr1_a = 0xa000ull;
+ config.addr_filter.addr1_b = 0xb000ull;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr3_a = 0x100a000ull;
+ config.addr_filter.addr3_b = 0x100b000ull;
+
+ ptu_uint_ne(config.addr_filter.config.addr_cfg, 0ull);
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, 0);
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_disabled);
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, 1);
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_filter);
+
+ addr_a = pt_filter_addr_a(&config.addr_filter, 1);
+ ptu_uint_eq(addr_a, 0xa000ull);
+
+ addr_b = pt_filter_addr_b(&config.addr_filter, 1);
+ ptu_uint_eq(addr_b, 0xb000ull);
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, 2);
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_disabled);
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, 3);
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_stop);
+
+ addr_a = pt_filter_addr_a(&config.addr_filter, 3);
+ ptu_uint_eq(addr_a, 0x100a000ull);
+
+ addr_b = pt_filter_addr_b(&config.addr_filter, 3);
+ ptu_uint_eq(addr_b, 0x100b000ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_oob(uint8_t filter)
+{
+ struct pt_config config;
+ uint64_t addr_a, addr_b;
+ uint32_t addr_cfg;
+
+ pt_config_init(&config);
+
+ memset(&config.addr_filter, 0xcc, sizeof(config.addr_filter));
+
+ addr_cfg = pt_filter_addr_cfg(&config.addr_filter, filter);
+ ptu_uint_eq(addr_cfg, pt_addr_cfg_disabled);
+
+ addr_a = pt_filter_addr_a(&config.addr_filter, filter);
+ ptu_uint_eq(addr_a, 0ull);
+
+ addr_b = pt_filter_addr_b(&config.addr_filter, filter);
+ ptu_uint_eq(addr_b, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_ip_in(void)
+{
+ struct pt_config config;
+ int status;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr1_a = 0xa000;
+ config.addr_filter.addr1_b = 0xb000;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr3_a = 0x10a000;
+ config.addr_filter.addr3_b = 0x10b000;
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xa000);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xaf00);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xb000);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10a000);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10af00);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10b000);
+ ptu_int_eq(status, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_ip_out(void)
+{
+ struct pt_config config;
+ int status;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr1_a = 0xa000;
+ config.addr_filter.addr1_b = 0xb000;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr3_a = 0x10a000;
+ config.addr_filter.addr3_b = 0x10b000;
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xfff);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xb001);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x100fff);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10b001);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_stop_in(void)
+{
+ struct pt_config config;
+ int status;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr1_a = 0xa000;
+ config.addr_filter.addr1_b = 0xb000;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr3_a = 0x10a000;
+ config.addr_filter.addr3_b = 0x10b000;
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xa000);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xaf00);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xb000);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10a000);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10af00);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10b000);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_stop_out(void)
+{
+ struct pt_config config;
+ int status;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr1_a = 0xa000;
+ config.addr_filter.addr1_b = 0xb000;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr3_a = 0x10a000;
+ config.addr_filter.addr3_b = 0x10b000;
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xfff);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0xb001);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x100fff);
+ ptu_int_eq(status, 1);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10b001);
+ ptu_int_eq(status, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_ip_out_stop_in(void)
+{
+ struct pt_config config;
+ int status;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr1_a = 0x100f00;
+ config.addr_filter.addr1_b = 0x10af00;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr3_a = 0x10a000;
+ config.addr_filter.addr3_b = 0x10b000;
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10af01);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10b000);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result addr_filter_ip_in_stop_in(void)
+{
+ struct pt_config config;
+ int status;
+
+ pt_config_init(&config);
+ config.addr_filter.config.ctl.addr1_cfg = pt_addr_cfg_filter;
+ config.addr_filter.addr1_a = 0x100f00;
+ config.addr_filter.addr1_b = 0x10af00;
+ config.addr_filter.config.ctl.addr3_cfg = pt_addr_cfg_stop;
+ config.addr_filter.addr3_a = 0x10a000;
+ config.addr_filter.addr3_b = 0x10b000;
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10af00);
+ ptu_int_eq(status, 0);
+
+ status = pt_filter_addr_check(&config.addr_filter, 0x10a0ff);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, from_user_null);
+ ptu_run(suite, from_user_too_small);
+ ptu_run(suite, from_user_bad_buffer);
+ ptu_run(suite, from_user);
+ ptu_run(suite, from_user_small);
+ ptu_run(suite, from_user_big);
+ ptu_run(suite, size);
+
+ ptu_run(suite, addr_filter_size);
+ ptu_run(suite, addr_filter_none);
+ ptu_run(suite, addr_filter_0);
+ ptu_run(suite, addr_filter_1_3);
+ ptu_run_p(suite, addr_filter_oob, 255);
+ ptu_run_p(suite, addr_filter_oob, 8);
+
+ ptu_run(suite, addr_filter_ip_in);
+ ptu_run(suite, addr_filter_ip_out);
+ ptu_run(suite, addr_filter_stop_in);
+ ptu_run(suite, addr_filter_stop_out);
+ ptu_run(suite, addr_filter_ip_out_stop_in);
+ ptu_run(suite, addr_filter_ip_in_stop_in);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-cpp.cpp b/contrib/processor-trace/libipt/test/src/ptunit-cpp.cpp
new file mode 100644
index 0000000000000..dad41362a7a27
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-cpp.cpp
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "intel-pt.h"
+
+
+static struct ptunit_result init_packet_decoder(void)
+{
+ uint8_t buf[1];
+ struct pt_config config;
+ struct pt_packet_decoder *decoder;
+
+ pt_config_init(&config);
+ config.begin = buf;
+ config.end = buf + sizeof(buf);
+
+ decoder = pt_pkt_alloc_decoder(&config);
+ ptu_ptr(decoder);
+ pt_pkt_free_decoder(decoder);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_query_decoder(void)
+{
+ uint8_t buf[1];
+ struct pt_config config;
+ struct pt_query_decoder *query_decoder;
+
+ pt_config_init(&config);
+ config.begin = buf;
+ config.end = buf + sizeof(buf);
+
+ query_decoder = pt_qry_alloc_decoder(&config);
+ ptu_ptr(query_decoder);
+ pt_qry_free_decoder(query_decoder);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init_packet_decoder);
+ ptu_run(suite, init_query_decoder);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-cpu.c b/contrib/processor-trace/libipt/test/src/ptunit-cpu.c
new file mode 100644
index 0000000000000..a82e0f51cea03
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-cpu.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_cpu.h"
+#include "pt_cpuid.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+
+
+void pt_cpuid(uint32_t leaf, uint32_t *eax, uint32_t *ebx, uint32_t *ecx,
+ uint32_t *edx)
+{
+ (void) leaf;
+ (void) eax;
+ (void) ebx;
+ (void) ecx;
+ (void) edx;
+}
+
+
+static struct ptunit_result cpu_valid(void)
+{
+ struct pt_cpu cpu;
+ int error;
+
+ error = pt_cpu_parse(&cpu, "6/44/2");
+ ptu_int_eq(error, 0);
+ ptu_int_eq(cpu.vendor, pcv_intel);
+ ptu_uint_eq(cpu.family, 6);
+ ptu_uint_eq(cpu.model, 44);
+ ptu_uint_eq(cpu.stepping, 2);
+
+ error = pt_cpu_parse(&cpu, "0xf/0x2c/0xf");
+ ptu_int_eq(error, 0);
+ ptu_int_eq(cpu.vendor, pcv_intel);
+ ptu_uint_eq(cpu.family, 0xf);
+ ptu_uint_eq(cpu.model, 0x2c);
+ ptu_uint_eq(cpu.stepping, 0xf);
+
+ error = pt_cpu_parse(&cpu, "022/054/017");
+ ptu_int_eq(error, 0);
+ ptu_int_eq(cpu.vendor, pcv_intel);
+ ptu_uint_eq(cpu.family, 022);
+ ptu_uint_eq(cpu.model, 054);
+ ptu_uint_eq(cpu.stepping, 017);
+
+ error = pt_cpu_parse(&cpu, "6/44");
+ ptu_int_eq(error, 0);
+ ptu_int_eq(cpu.vendor, pcv_intel);
+ ptu_uint_eq(cpu.family, 6);
+ ptu_uint_eq(cpu.model, 44);
+ ptu_uint_eq(cpu.stepping, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cpu_null(void)
+{
+ struct pt_cpu cpu;
+ int error;
+
+ error = pt_cpu_parse(&cpu, NULL);
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(NULL, "");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(NULL, NULL);
+ ptu_int_eq(error, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cpu_incomplete(void)
+{
+ struct pt_cpu cpu;
+ int error;
+
+ error = pt_cpu_parse(&cpu, "");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6//2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "//");
+ ptu_int_eq(error, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cpu_invalid(void)
+{
+ struct pt_cpu cpu;
+ int error;
+
+ error = pt_cpu_parse(&cpu, "e/44/2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/e/2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/44/e");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "65536/44/2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/256/2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/44/256");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "-1/44/2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/-1/2");
+ ptu_int_eq(error, -pte_invalid);
+
+ error = pt_cpu_parse(&cpu, "6/44/-1");
+ ptu_int_eq(error, -pte_invalid);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, cpu_valid);
+ ptu_run(suite, cpu_null);
+ ptu_run(suite, cpu_incomplete);
+ ptu_run(suite, cpu_invalid);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-event_queue.c b/contrib/processor-trace/libipt/test/src/ptunit-event_queue.c
new file mode 100644
index 0000000000000..41566e708069d
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-event_queue.c
@@ -0,0 +1,470 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_event_queue.h"
+
+
+/* A test fixture providing an initialized event queue. */
+struct evq_fixture {
+ /* The event queue. */
+ struct pt_event_queue evq;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct evq_fixture *);
+ struct ptunit_result (*fini)(struct evq_fixture *);
+};
+
+
+static struct ptunit_result efix_init(struct evq_fixture *efix)
+{
+ pt_evq_init(&efix->evq);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result efix_init_pending(struct evq_fixture *efix)
+{
+ struct pt_event *ev;
+ int evb;
+
+ pt_evq_init(&efix->evq);
+
+ for (evb = 0; evb < evb_max; ++evb) {
+ ev = pt_evq_enqueue(&efix->evq, (enum pt_event_binding) evb);
+ ptu_ptr(ev);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result standalone_null(void)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_standalone(NULL);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result standalone(struct evq_fixture *efix)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_standalone(&efix->evq);
+ ptu_ptr(ev);
+ ptu_uint_eq(ev->ip_suppressed, 0ul);
+ ptu_uint_eq(ev->status_update, 0ul);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result enqueue_null(enum pt_event_binding evb)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_enqueue(NULL, evb);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result dequeue_null(enum pt_event_binding evb)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_dequeue(NULL, evb);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result dequeue_empty(struct evq_fixture *efix,
+ enum pt_event_binding evb)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_dequeue(&efix->evq, evb);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result evq_empty(struct evq_fixture *efix,
+ enum pt_event_binding evb)
+{
+ int status;
+
+ status = pt_evq_empty(&efix->evq, evb);
+ ptu_int_gt(status, 0);
+
+ status = pt_evq_pending(&efix->evq, evb);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result evq_pending(struct evq_fixture *efix,
+ enum pt_event_binding evb)
+{
+ int status;
+
+ status = pt_evq_empty(&efix->evq, evb);
+ ptu_int_eq(status, 0);
+
+ status = pt_evq_pending(&efix->evq, evb);
+ ptu_int_gt(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result evq_others_empty(struct evq_fixture *efix,
+ enum pt_event_binding evb)
+{
+ int other;
+
+ for (other = 0; other < evb_max; ++other) {
+ enum pt_event_binding ob;
+
+ ob = (enum pt_event_binding) other;
+ if (ob != evb)
+ ptu_test(evq_empty, efix, ob);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result enqueue_all_dequeue(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ size_t num)
+{
+ struct pt_event *in[evq_max], *out[evq_max];
+ size_t idx;
+
+ ptu_uint_le(num, evq_max - 2);
+
+ for (idx = 0; idx < num; ++idx) {
+ in[idx] = pt_evq_enqueue(&efix->evq, evb);
+ ptu_ptr(in[idx]);
+ }
+
+ ptu_test(evq_pending, efix, evb);
+ ptu_test(evq_others_empty, efix, evb);
+
+ for (idx = 0; idx < num; ++idx) {
+ out[idx] = pt_evq_dequeue(&efix->evq, evb);
+ ptu_ptr_eq(out[idx], in[idx]);
+ }
+
+ ptu_test(evq_empty, efix, evb);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result enqueue_one_dequeue(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ size_t num)
+{
+ size_t idx;
+
+ for (idx = 0; idx < num; ++idx) {
+ struct pt_event *in, *out;
+
+ in = pt_evq_enqueue(&efix->evq, evb);
+ ptu_ptr(in);
+
+ out = pt_evq_dequeue(&efix->evq, evb);
+ ptu_ptr_eq(out, in);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result overflow(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ size_t num)
+{
+ struct pt_event *in[evq_max], *out[evq_max], *ev;
+ size_t idx;
+
+ ptu_uint_le(num, evq_max - 2);
+
+ for (idx = 0; idx < (evq_max - 2); ++idx) {
+ in[idx] = pt_evq_enqueue(&efix->evq, evb);
+ ptu_ptr(in[idx]);
+ }
+
+ for (idx = 0; idx < num; ++idx) {
+ ev = pt_evq_enqueue(&efix->evq, evb);
+ ptu_null(ev);
+ }
+
+ for (idx = 0; idx < num; ++idx) {
+ out[idx] = pt_evq_dequeue(&efix->evq, evb);
+ ptu_ptr_eq(out[idx], in[idx]);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result clear_null(enum pt_event_binding evb)
+{
+ int errcode;
+
+ errcode = pt_evq_clear(NULL, evb);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result clear(struct evq_fixture *efix,
+ enum pt_event_binding evb)
+{
+ int errcode;
+
+ errcode = pt_evq_clear(&efix->evq, evb);
+ ptu_int_eq(errcode, 0);
+
+ ptu_test(evq_empty, efix, evb);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result empty_null(enum pt_event_binding evb)
+{
+ int errcode;
+
+ errcode = pt_evq_empty(NULL, evb);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pending_null(enum pt_event_binding evb)
+{
+ int errcode;
+
+ errcode = pt_evq_pending(NULL, evb);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_null(enum pt_event_binding evb,
+ enum pt_event_type evt)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_find(NULL, evb, evt);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_empty(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ enum pt_event_type evt)
+{
+ struct pt_event *ev;
+
+ ev = pt_evq_find(&efix->evq, evb, evt);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_none_evb(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ enum pt_event_type evt)
+{
+ struct pt_event *ev;
+ size_t other;
+
+ for (other = 0; other < evb_max; ++other) {
+ enum pt_event_binding ob;
+
+ ob = (enum pt_event_binding) other;
+ if (ob != evb) {
+ ev = pt_evq_enqueue(&efix->evq, ob);
+ ptu_ptr(ev);
+
+ ev->type = evt;
+ }
+ }
+
+ ev = pt_evq_find(&efix->evq, evb, evt);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result evq_enqueue_other(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ enum pt_event_type evt,
+ size_t num)
+{
+ enum pt_event_type ot;
+ struct pt_event *ev;
+ size_t other;
+
+ for (other = 0; other < num; ++other) {
+ ot = (enum pt_event_type) other;
+ if (ot != evt) {
+ ev = pt_evq_enqueue(&efix->evq, evb);
+ ptu_ptr(ev);
+
+ ev->type = ot;
+ }
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_none_evt(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ enum pt_event_type evt,
+ size_t num)
+{
+ struct pt_event *ev;
+
+ ptu_test(evq_enqueue_other, efix, evb, evt, num);
+
+ ev = pt_evq_find(&efix->evq, evb, evt);
+ ptu_null(ev);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find(struct evq_fixture *efix,
+ enum pt_event_binding evb,
+ enum pt_event_type evt,
+ size_t before, size_t after)
+{
+ struct pt_event *in, *out;
+
+ ptu_test(evq_enqueue_other, efix, evb, evt, before);
+
+ in = pt_evq_enqueue(&efix->evq, evb);
+ ptu_ptr(in);
+
+ in->type = evt;
+
+ ptu_test(evq_enqueue_other, efix, evb, evt, after);
+
+ out = pt_evq_find(&efix->evq, evb, evt);
+ ptu_ptr_eq(out, in);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct evq_fixture efix, pfix;
+ struct ptunit_suite suite;
+
+ efix.init = efix_init;
+ efix.fini = NULL;
+
+ pfix.init = efix_init_pending;
+ pfix.fini = NULL;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, standalone_null);
+ ptu_run_f(suite, standalone, efix);
+
+ ptu_run_p(suite, enqueue_null, evb_psbend);
+ ptu_run_p(suite, enqueue_null, evb_tip);
+ ptu_run_p(suite, enqueue_null, evb_fup);
+
+ ptu_run_p(suite, dequeue_null, evb_psbend);
+ ptu_run_p(suite, dequeue_null, evb_tip);
+ ptu_run_p(suite, dequeue_null, evb_fup);
+
+ ptu_run_fp(suite, dequeue_empty, efix, evb_psbend);
+ ptu_run_fp(suite, dequeue_empty, efix, evb_tip);
+ ptu_run_fp(suite, dequeue_empty, efix, evb_fup);
+
+ ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_psbend, 1);
+ ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_psbend, 2);
+ ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_tip, 1);
+ ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_tip, 3);
+ ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_fup, 1);
+ ptu_run_fp(suite, enqueue_all_dequeue, efix, evb_fup, 4);
+
+ ptu_run_fp(suite, enqueue_one_dequeue, efix, evb_psbend, evb_max * 2);
+ ptu_run_fp(suite, enqueue_one_dequeue, efix, evb_tip, evb_max * 2);
+ ptu_run_fp(suite, enqueue_one_dequeue, efix, evb_fup, evb_max * 2);
+
+ ptu_run_fp(suite, overflow, efix, evb_psbend, 1);
+ ptu_run_fp(suite, overflow, efix, evb_tip, 2);
+ ptu_run_fp(suite, overflow, efix, evb_fup, 3);
+
+ ptu_run_p(suite, clear_null, evb_psbend);
+ ptu_run_p(suite, clear_null, evb_tip);
+ ptu_run_p(suite, clear_null, evb_fup);
+
+ ptu_run_fp(suite, clear, efix, evb_psbend);
+ ptu_run_fp(suite, clear, pfix, evb_psbend);
+ ptu_run_fp(suite, clear, efix, evb_tip);
+ ptu_run_fp(suite, clear, pfix, evb_tip);
+ ptu_run_fp(suite, clear, efix, evb_fup);
+ ptu_run_fp(suite, clear, pfix, evb_fup);
+
+ ptu_run_p(suite, empty_null, evb_psbend);
+ ptu_run_p(suite, empty_null, evb_tip);
+ ptu_run_p(suite, empty_null, evb_fup);
+
+ ptu_run_p(suite, pending_null, evb_psbend);
+ ptu_run_p(suite, pending_null, evb_tip);
+ ptu_run_p(suite, pending_null, evb_fup);
+
+ ptu_run_p(suite, find_null, evb_psbend, ptev_enabled);
+ ptu_run_p(suite, find_null, evb_tip, ptev_disabled);
+ ptu_run_p(suite, find_null, evb_fup, ptev_paging);
+
+ ptu_run_fp(suite, find_empty, efix, evb_psbend, ptev_enabled);
+ ptu_run_fp(suite, find_empty, efix, evb_tip, ptev_disabled);
+ ptu_run_fp(suite, find_empty, efix, evb_fup, ptev_paging);
+
+ ptu_run_fp(suite, find_none_evb, efix, evb_psbend, ptev_enabled);
+ ptu_run_fp(suite, find_none_evb, efix, evb_tip, ptev_disabled);
+ ptu_run_fp(suite, find_none_evb, efix, evb_fup, ptev_paging);
+
+ ptu_run_fp(suite, find_none_evt, efix, evb_psbend, ptev_enabled, 3);
+ ptu_run_fp(suite, find_none_evt, efix, evb_tip, ptev_disabled, 4);
+ ptu_run_fp(suite, find_none_evt, efix, evb_fup, ptev_paging, 2);
+
+ ptu_run_fp(suite, find, efix, evb_psbend, ptev_enabled, 0, 3);
+ ptu_run_fp(suite, find, efix, evb_tip, ptev_disabled, 2, 0);
+ ptu_run_fp(suite, find, efix, evb_fup, ptev_paging, 1, 4);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-fetch.c b/contrib/processor-trace/libipt/test/src/ptunit-fetch.c
new file mode 100644
index 0000000000000..64c2455ef36b9
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-fetch.c
@@ -0,0 +1,693 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_decoder_function.h"
+#include "pt_packet_decoder.h"
+#include "pt_query_decoder.h"
+#include "pt_encoder.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+
+/* A test fixture for decoder function fetch tests. */
+struct fetch_fixture {
+ /* The trace buffer. */
+ uint8_t buffer[1024];
+
+ /* A trace configuration. */
+ struct pt_config config;
+
+ /* A trace encoder. */
+ struct pt_encoder encoder;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct fetch_fixture *);
+ struct ptunit_result (*fini)(struct fetch_fixture *);
+};
+
+static struct ptunit_result ffix_init(struct fetch_fixture *ffix)
+{
+ memset(ffix->buffer, pt_opc_bad, sizeof(ffix->buffer));
+
+ memset(&ffix->config, 0, sizeof(ffix->config));
+ ffix->config.size = sizeof(ffix->config);
+ ffix->config.begin = ffix->buffer;
+ ffix->config.end = ffix->buffer + sizeof(ffix->buffer);
+
+ pt_encoder_init(&ffix->encoder, &ffix->config);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ffix_fini(struct fetch_fixture *ffix)
+{
+ pt_encoder_fini(&ffix->encoder);
+
+ return ptu_passed();
+}
+
+
+static struct ptunit_result fetch_null(struct fetch_fixture *ffix)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ errcode = pt_df_fetch(NULL, ffix->config.begin, &ffix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_df_fetch(&dfun, NULL, &ffix->config);
+ ptu_int_eq(errcode, -pte_nosync);
+
+ errcode = pt_df_fetch(&dfun, ffix->config.begin, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_empty(struct fetch_fixture *ffix)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ errcode = pt_df_fetch(&dfun, ffix->config.end, &ffix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_unknown(struct fetch_fixture *ffix)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ ffix->config.begin[0] = pt_opc_bad;
+
+ errcode = pt_df_fetch(&dfun, ffix->config.begin, &ffix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(dfun, &pt_decode_unknown);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_unknown_ext(struct fetch_fixture *ffix)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ ffix->config.begin[0] = pt_opc_ext;
+ ffix->config.begin[1] = pt_ext_bad;
+
+ errcode = pt_df_fetch(&dfun, ffix->config.begin, &ffix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(dfun, &pt_decode_unknown);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_unknown_ext2(struct fetch_fixture *ffix)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ ffix->config.begin[0] = pt_opc_ext;
+ ffix->config.begin[1] = pt_ext_ext2;
+ ffix->config.begin[2] = pt_ext2_bad;
+
+ errcode = pt_df_fetch(&dfun, ffix->config.begin, &ffix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(dfun, &pt_decode_unknown);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_packet(struct fetch_fixture *ffix,
+ const struct pt_packet *packet,
+ const struct pt_decoder_function *df)
+{
+ const struct pt_decoder_function *dfun;
+ int errcode;
+
+ errcode = pt_enc_next(&ffix->encoder, packet);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_df_fetch(&dfun, ffix->config.begin, &ffix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(dfun, df);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_type(struct fetch_fixture *ffix,
+ enum pt_packet_type type,
+ const struct pt_decoder_function *dfun)
+{
+ struct pt_packet packet;
+
+ memset(&packet, 0, sizeof(packet));
+ packet.type = type;
+
+ ptu_test(fetch_packet, ffix, &packet, dfun);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_tnt_8(struct fetch_fixture *ffix)
+{
+ struct pt_packet packet;
+
+ memset(&packet, 0, sizeof(packet));
+ packet.type = ppt_tnt_8;
+ packet.payload.tnt.bit_size = 1;
+
+ ptu_test(fetch_packet, ffix, &packet, &pt_decode_tnt_8);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_mode_exec(struct fetch_fixture *ffix)
+{
+ struct pt_packet packet;
+
+ memset(&packet, 0, sizeof(packet));
+ packet.type = ppt_mode;
+ packet.payload.mode.leaf = pt_mol_exec;
+
+ ptu_test(fetch_packet, ffix, &packet, &pt_decode_mode);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_mode_tsx(struct fetch_fixture *ffix)
+{
+ struct pt_packet packet;
+
+ memset(&packet, 0, sizeof(packet));
+ packet.type = ppt_mode;
+ packet.payload.mode.leaf = pt_mol_tsx;
+
+ ptu_test(fetch_packet, ffix, &packet, &pt_decode_mode);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fetch_exstop_ip(struct fetch_fixture *ffix)
+{
+ struct pt_packet packet;
+
+ memset(&packet, 0, sizeof(packet));
+ packet.type = ppt_exstop;
+ packet.payload.exstop.ip = 1;
+
+ ptu_test(fetch_packet, ffix, &packet, &pt_decode_exstop);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct fetch_fixture ffix;
+ struct ptunit_suite suite;
+
+ ffix.init = ffix_init;
+ ffix.fini = ffix_fini;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run_f(suite, fetch_null, ffix);
+ ptu_run_f(suite, fetch_empty, ffix);
+
+ ptu_run_f(suite, fetch_unknown, ffix);
+ ptu_run_f(suite, fetch_unknown_ext, ffix);
+ ptu_run_f(suite, fetch_unknown_ext2, ffix);
+
+ ptu_run_fp(suite, fetch_type, ffix, ppt_pad, &pt_decode_pad);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_psb, &pt_decode_psb);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_tip, &pt_decode_tip);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_tnt_64, &pt_decode_tnt_64);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_tip_pge, &pt_decode_tip_pge);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_tip_pgd, &pt_decode_tip_pgd);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_fup, &pt_decode_fup);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_pip, &pt_decode_pip);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_ovf, &pt_decode_ovf);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_psbend, &pt_decode_psbend);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_tsc, &pt_decode_tsc);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_cbr, &pt_decode_cbr);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_tma, &pt_decode_tma);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_mtc, &pt_decode_mtc);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_cyc, &pt_decode_cyc);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_stop, &pt_decode_stop);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_vmcs, &pt_decode_vmcs);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_mnt, &pt_decode_mnt);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_exstop, &pt_decode_exstop);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_mwait, &pt_decode_mwait);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_pwre, &pt_decode_pwre);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_pwrx, &pt_decode_pwrx);
+ ptu_run_fp(suite, fetch_type, ffix, ppt_ptw, &pt_decode_ptw);
+
+ ptu_run_f(suite, fetch_tnt_8, ffix);
+ ptu_run_f(suite, fetch_mode_exec, ffix);
+ ptu_run_f(suite, fetch_mode_tsx, ffix);
+ ptu_run_f(suite, fetch_exstop_ip, ffix);
+
+ return ptunit_report(&suite);
+}
+
+
+/* Dummy decode functions to satisfy link dependencies.
+ *
+ * As a nice side-effect, we will know if we need to add more tests when
+ * adding new decoder functions.
+ */
+int pt_pkt_decode_unknown(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_unknown(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_pad(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pad(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_psb(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_psb(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tip(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tip(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tnt_8(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tnt_8(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tnt_64(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tnt_64(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tip_pge(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tip_pge(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tip_pgd(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tip_pgd(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_fup(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_fup(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_fup(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_pip(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pip(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_pip(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_ovf(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_ovf(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_mode(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mode(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_mode(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_psbend(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_psbend(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tsc(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tsc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_tsc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_cbr(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_cbr(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_cbr(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_tma(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tma(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_mtc(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mtc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_cyc(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_cyc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_stop(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_stop(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_vmcs(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_vmcs(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_vmcs(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_mnt(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mnt(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_mnt(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_exstop(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_exstop(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_mwait(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mwait(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_pwre(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pwre(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_pwrx(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pwrx(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+
+int pt_pkt_decode_ptw(struct pt_packet_decoder *d, struct pt_packet *p)
+{
+ (void) d;
+ (void) p;
+
+ return -pte_internal;
+}
+int pt_qry_decode_ptw(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-ild.c b/contrib/processor-trace/libipt/test/src/ptunit-ild.c
new file mode 100644
index 0000000000000..88d3331ceb6f9
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-ild.c
@@ -0,0 +1,759 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_ild.h"
+
+#include <string.h>
+
+
+/* Check that an instruction is decoded correctly. */
+static struct ptunit_result ptunit_ild_decode(uint8_t *raw, uint8_t size,
+ enum pt_exec_mode mode)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int errcode;
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ memcpy(insn.raw, raw, size);
+ insn.size = size;
+ insn.mode = mode;
+
+ errcode = pt_ild_decode(&insn, &iext);
+ ptu_int_eq(errcode, 0);
+
+ ptu_uint_eq(insn.size, size);
+ ptu_int_eq(insn.iclass, ptic_other);
+ ptu_int_eq(iext.iclass, PTI_INST_INVALID);
+
+ return ptu_passed();
+}
+
+/* Check that an instruction is decoded and classified correctly. */
+static struct ptunit_result ptunit_ild_classify(uint8_t *raw, uint8_t size,
+ enum pt_exec_mode mode,
+ pti_inst_enum_t iclass)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int errcode;
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ memcpy(insn.raw, raw, size);
+ insn.size = size;
+ insn.mode = mode;
+
+ errcode = pt_ild_decode(&insn, &iext);
+ ptu_int_eq(errcode, 0);
+
+ ptu_uint_eq(insn.size, size);
+ ptu_int_eq(iext.iclass, iclass);
+
+ return ptu_passed();
+}
+
+/* Check that an invalid instruction is detected correctly.
+ *
+ * Note that we intentionally do not detect all invalid instructions. This test
+ * therefore only covers some that we care about.
+ */
+static struct ptunit_result ptunit_ild_invalid(uint8_t *raw, uint8_t size,
+ enum pt_exec_mode mode)
+{
+ struct pt_insn_ext iext;
+ struct pt_insn insn;
+ int errcode;
+
+ memset(&iext, 0, sizeof(iext));
+ memset(&insn, 0, sizeof(insn));
+
+ memcpy(insn.raw, raw, size);
+ insn.size = size;
+ insn.mode = mode;
+
+ errcode = pt_ild_decode(&insn, &iext);
+ ptu_int_eq(errcode, -pte_bad_insn);
+
+ return ptu_passed();
+}
+
+
+/* Macros to automatically update the test location. */
+#define ptu_decode(insn, size, mode) \
+ ptu_check(ptunit_ild_decode, insn, size, mode)
+
+#define ptu_classify(insn, size, mode, iclass) \
+ ptu_check(ptunit_ild_classify, insn, size, mode, iclass)
+
+/* Macros to also automatically supply the instruction size. */
+#define ptu_decode_s(insn, mode) \
+ ptu_decode(insn, sizeof(insn), mode)
+
+#define ptu_classify_s(insn, mode, iclass) \
+ ptu_classify(insn, sizeof(insn), mode, iclass)
+
+#define ptu_invalid_s(insn, mode) \
+ ptu_check(ptunit_ild_invalid, insn, sizeof(insn), mode)
+
+
+static struct ptunit_result push(void)
+{
+ uint8_t insn[] = { 0x68, 0x11, 0x22, 0x33, 0x44 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result jmp_rel(void)
+{
+ uint8_t insn[] = { 0xE9, 0x60, 0xF9, 0xFF, 0xFF };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_JMP_E9);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result long_nop(void)
+{
+ uint8_t insn[] = { 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66, 0X2E, 0X0F,
+ 0X1F, 0x84, 0x00, 0x00,
+ 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_al_64(void)
+{
+ uint8_t insn[] = { 0x48, 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee,
+ 0xff, 0x11 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_al_32_em64(void)
+{
+ uint8_t insn[] = { 0x67, 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee,
+ 0xff, 0X11 };
+
+ ptu_decode(insn, 6, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_al_32(void)
+{
+ uint8_t insn[] = { 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee };
+
+ ptu_decode(insn, 5, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_al_32_em16(void)
+{
+ uint8_t insn[] = { 0x67, 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee };
+
+ ptu_decode(insn, 6, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_al_16_em32(void)
+{
+ uint8_t insn[] = { 0x67, 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee };
+
+ ptu_decode(insn, 4, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_al_16(void)
+{
+ uint8_t insn[] = { 0xa0, 0x3f, 0xaa, 0xbb, 0xcc, 0xdd, 0xee };
+
+ ptu_decode(insn, 3, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result rdtsc(void)
+{
+ uint8_t insn[] = { 0x0f, 0x31 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pcmpistri(void)
+{
+ uint8_t insn[] = { 0x66, 0x0f, 0x3a, 0x63, 0x04, 0x16, 0x1a };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vmovdqa(void)
+{
+ uint8_t insn[] = { 0xc5, 0xf9, 0x6f, 0x25, 0xa9, 0x55, 0x04, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vpandn(void)
+{
+ uint8_t insn[] = { 0xc4, 0x41, 0x29, 0xdf, 0xd1 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result syscall(void)
+{
+ uint8_t insn[] = { 0x0f, 0x05 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_SYSCALL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sysret(void)
+{
+ uint8_t insn[] = { 0x0f, 0x07 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_SYSRET);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sysenter(void)
+{
+ uint8_t insn[] = { 0x0f, 0x34 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_SYSENTER);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sysexit(void)
+{
+ uint8_t insn[] = { 0x0f, 0x35 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_SYSEXIT);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result int3(void)
+{
+ uint8_t insn[] = { 0xcc };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_INT3);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result intn(void)
+{
+ uint8_t insn[] = { 0xcd, 0x06 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_INT);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result iret(void)
+{
+ uint8_t insn[] = { 0xcf };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_IRET);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result call_9a_cd(void)
+{
+ uint8_t insn[] = { 0x9a, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_16bit, PTI_INST_CALL_9A);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result call_9a_cp(void)
+{
+ uint8_t insn[] = { 0x9a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_32bit, PTI_INST_CALL_9A);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result call_ff_3(void)
+{
+ uint8_t insn[] = { 0xff, 0x1c, 0x25, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_CALL_FFr3);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result jmp_ff_5(void)
+{
+ uint8_t insn[] = { 0xff, 0x2c, 0x25, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_JMP_FFr5);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result jmp_ea_cd(void)
+{
+ uint8_t insn[] = { 0xea, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_16bit, PTI_INST_JMP_EA);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result jmp_ea_cp(void)
+{
+ uint8_t insn[] = { 0xea, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_32bit, PTI_INST_JMP_EA);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ret_ca(void)
+{
+ uint8_t insn[] = { 0xca, 0x00, 0x00 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_RET_CA);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vmlaunch(void)
+{
+ uint8_t insn[] = { 0x0f, 0x01, 0xc2 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_VMLAUNCH);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vmresume(void)
+{
+ uint8_t insn[] = { 0x0f, 0x01, 0xc3 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_VMRESUME);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vmcall(void)
+{
+ uint8_t insn[] = { 0x0f, 0x01, 0xc1 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_VMCALL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vmptrld(void)
+{
+ uint8_t insn[] = { 0x0f, 0xc7, 0x30 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_VMPTRLD);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result jrcxz(void)
+{
+ uint8_t insn[] = { 0xe3, 0x00 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_JrCXZ);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_eax_moffs64(void)
+{
+ uint8_t insn[] = { 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_eax_moffs64_32(void)
+{
+ uint8_t insn[] = { 0x67, 0xa1, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_rax_moffs64(void)
+{
+ uint8_t insn[] = { 0x48, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_rax_moffs64_32(void)
+{
+ uint8_t insn[] = { 0x67, 0x48, 0xa1, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_ax_moffs64(void)
+{
+ uint8_t insn[] = { 0x66, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_ax_moffs64_32(void)
+{
+ uint8_t insn[] = { 0x67, 0x66, 0xa1, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_eax_moffs32(void)
+{
+ uint8_t insn[] = { 0xa1, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_ax_moffs32(void)
+{
+ uint8_t insn[] = { 0x66, 0xa1, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mov_ax_moffs16(void)
+{
+ uint8_t insn[] = { 0xa1, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result les(void)
+{
+ uint8_t insn[] = { 0xc4, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result les_disp16(void)
+{
+ uint8_t insn[] = { 0xc4, 0x06, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result les_disp32(void)
+{
+ uint8_t insn[] = { 0xc4, 0x05, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result les_ind_disp8(void)
+{
+ uint8_t insn[] = { 0xc4, 0x40, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result les_ind_disp16(void)
+{
+ uint8_t insn[] = { 0xc4, 0x80, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result les_ind_disp32(void)
+{
+ uint8_t insn[] = { 0xc4, 0x80, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lds(void)
+{
+ uint8_t insn[] = { 0xc5, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lds_disp16(void)
+{
+ uint8_t insn[] = { 0xc5, 0x06, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lds_disp32(void)
+{
+ uint8_t insn[] = { 0xc5, 0x05, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lds_ind_disp8(void)
+{
+ uint8_t insn[] = { 0xc5, 0x40, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lds_ind_disp16(void)
+{
+ uint8_t insn[] = { 0xc5, 0x80, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lds_ind_disp32(void)
+{
+ uint8_t insn[] = { 0xc5, 0x80, 0x00, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_32bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vpshufb(void)
+{
+ uint8_t insn[] = { 0x62, 0x02, 0x05, 0x00, 0x00, 0x00 };
+
+ ptu_decode_s(insn, ptem_64bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bound(void)
+{
+ uint8_t insn[] = { 0x62, 0x02 };
+
+ ptu_decode_s(insn, ptem_32bit);
+ ptu_decode_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result evex_cutoff(void)
+{
+ uint8_t insn[] = { 0x62 };
+
+ ptu_invalid_s(insn, ptem_64bit);
+ ptu_invalid_s(insn, ptem_32bit);
+ ptu_invalid_s(insn, ptem_16bit);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptwrite_r32(void)
+{
+ uint8_t insn[] = { 0xf3, 0x0f, 0xae, 0xe7 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_PTWRITE);
+ ptu_classify_s(insn, ptem_32bit, PTI_INST_PTWRITE);
+ ptu_classify_s(insn, ptem_16bit, PTI_INST_PTWRITE);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptwrite_m32(void)
+{
+ uint8_t insn[] = { 0xf3, 0x0f, 0xae, 0x67, 0xcc };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_PTWRITE);
+ ptu_classify_s(insn, ptem_32bit, PTI_INST_PTWRITE);
+ ptu_classify_s(insn, ptem_16bit, PTI_INST_PTWRITE);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptwrite_r64(void)
+{
+ uint8_t insn[] = { 0xf3, 0x48, 0x0f, 0xae, 0xe7 };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_PTWRITE);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptwrite_m64(void)
+{
+ uint8_t insn[] = { 0xf3, 0x48, 0x0f, 0xae, 0x67, 0xcc };
+
+ ptu_classify_s(insn, ptem_64bit, PTI_INST_PTWRITE);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ pt_ild_init();
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, push);
+ ptu_run(suite, jmp_rel);
+ ptu_run(suite, long_nop);
+ ptu_run(suite, mov_al_64);
+ ptu_run(suite, mov_al_32);
+ ptu_run(suite, mov_al_32_em64);
+ ptu_run(suite, mov_al_32_em16);
+ ptu_run(suite, mov_al_16_em32);
+ ptu_run(suite, mov_al_16);
+ ptu_run(suite, rdtsc);
+ ptu_run(suite, pcmpistri);
+ ptu_run(suite, vmovdqa);
+ ptu_run(suite, vpandn);
+ ptu_run(suite, syscall);
+ ptu_run(suite, sysret);
+ ptu_run(suite, sysenter);
+ ptu_run(suite, sysexit);
+ ptu_run(suite, int3);
+ ptu_run(suite, intn);
+ ptu_run(suite, iret);
+ ptu_run(suite, call_9a_cd);
+ ptu_run(suite, call_9a_cp);
+ ptu_run(suite, call_ff_3);
+ ptu_run(suite, jmp_ff_5);
+ ptu_run(suite, jmp_ea_cd);
+ ptu_run(suite, jmp_ea_cp);
+ ptu_run(suite, ret_ca);
+ ptu_run(suite, vmlaunch);
+ ptu_run(suite, vmresume);
+ ptu_run(suite, vmcall);
+ ptu_run(suite, vmptrld);
+ ptu_run(suite, jrcxz);
+ ptu_run(suite, mov_eax_moffs64);
+ ptu_run(suite, mov_eax_moffs64_32);
+ ptu_run(suite, mov_rax_moffs64);
+ ptu_run(suite, mov_rax_moffs64_32);
+ ptu_run(suite, mov_ax_moffs64);
+ ptu_run(suite, mov_ax_moffs64_32);
+ ptu_run(suite, mov_eax_moffs32);
+ ptu_run(suite, mov_ax_moffs32);
+ ptu_run(suite, mov_ax_moffs16);
+ ptu_run(suite, les);
+ ptu_run(suite, les_disp16);
+ ptu_run(suite, les_disp32);
+ ptu_run(suite, les_ind_disp8);
+ ptu_run(suite, les_ind_disp16);
+ ptu_run(suite, les_ind_disp32);
+ ptu_run(suite, lds);
+ ptu_run(suite, lds_disp16);
+ ptu_run(suite, lds_disp32);
+ ptu_run(suite, lds_ind_disp8);
+ ptu_run(suite, lds_ind_disp16);
+ ptu_run(suite, lds_ind_disp32);
+ ptu_run(suite, vpshufb);
+ ptu_run(suite, bound);
+ ptu_run(suite, evex_cutoff);
+ ptu_run(suite, ptwrite_r32);
+ ptu_run(suite, ptwrite_m32);
+ ptu_run(suite, ptwrite_r64);
+ ptu_run(suite, ptwrite_m64);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-image.c b/contrib/processor-trace/libipt/test/src/ptunit-image.c
new file mode 100644
index 0000000000000..f635dc366b27b
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-image.c
@@ -0,0 +1,2286 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_image.h"
+#include "pt_section.h"
+#include "pt_mapped_section.h"
+
+#include "intel-pt.h"
+
+
+struct image_fixture;
+
+/* A test mapping. */
+struct ifix_mapping {
+ /* The contents. */
+ uint8_t content[0x10];
+
+ /* The size - between 0 and sizeof(content). */
+ uint64_t size;
+
+ /* An artificial error code to be injected into pt_section_read().
+ *
+ * If @errcode is non-zero, pt_section_read() fails with @errcode.
+ */
+ int errcode;
+};
+
+/* A test file status - turned into a section status. */
+struct ifix_status {
+ /* Delete indication:
+ * - zero if initialized and not (yet) deleted
+ * - non-zero if deleted and not (re-)initialized
+ */
+ int deleted;
+
+ /* Put with use-count of zero indication. */
+ int bad_put;
+
+ /* The test mapping to be used. */
+ struct ifix_mapping *mapping;
+
+ /* A link back to the test fixture providing this section. */
+ struct image_fixture *ifix;
+};
+
+enum {
+ ifix_nsecs = 5
+};
+
+/* A fake image section cache. */
+struct pt_image_section_cache {
+ /* The cached sections. */
+ struct pt_section *section[ifix_nsecs];
+
+ /* Their load addresses. */
+ uint64_t laddr[ifix_nsecs];
+
+ /* The number of used sections. */
+ int nsecs;
+};
+
+extern int pt_iscache_lookup(struct pt_image_section_cache *iscache,
+ struct pt_section **section, uint64_t *laddr,
+ int isid);
+
+
+/* A test fixture providing an image, test sections, and asids. */
+struct image_fixture {
+ /* The image. */
+ struct pt_image image;
+
+ /* The test states. */
+ struct ifix_status status[ifix_nsecs];
+
+ /* The test mappings. */
+ struct ifix_mapping mapping[ifix_nsecs];
+
+ /* The sections. */
+ struct pt_section section[ifix_nsecs];
+
+ /* The asids. */
+ struct pt_asid asid[3];
+
+ /* The number of used sections/mappings/states. */
+ int nsecs;
+
+ /* An initially empty image as destination for image copies. */
+ struct pt_image copy;
+
+ /* A test section cache. */
+ struct pt_image_section_cache iscache;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct image_fixture *);
+ struct ptunit_result (*fini)(struct image_fixture *);
+};
+
+static void ifix_init_section(struct pt_section *section, char *filename,
+ struct ifix_status *status,
+ struct ifix_mapping *mapping,
+ struct image_fixture *ifix)
+{
+ uint8_t i;
+
+ memset(section, 0, sizeof(*section));
+
+ section->filename = filename;
+ section->status = status;
+ section->size = mapping->size = sizeof(mapping->content);
+ section->offset = 0x10;
+
+ for (i = 0; i < mapping->size; ++i)
+ mapping->content[i] = i;
+
+ status->deleted = 0;
+ status->bad_put = 0;
+ status->mapping = mapping;
+ status->ifix = ifix;
+}
+
+static int ifix_add_section(struct image_fixture *ifix, char *filename)
+{
+ int index;
+
+ if (!ifix)
+ return -pte_internal;
+
+ index = ifix->nsecs;
+ if (ifix_nsecs <= index)
+ return -pte_internal;
+
+ ifix_init_section(&ifix->section[index], filename, &ifix->status[index],
+ &ifix->mapping[index], ifix);
+
+ ifix->nsecs += 1;
+ return index;
+}
+
+static int ifix_cache_section(struct image_fixture *ifix,
+ struct pt_section *section, uint64_t laddr)
+{
+ int index;
+
+ if (!ifix)
+ return -pte_internal;
+
+ index = ifix->iscache.nsecs;
+ if (ifix_nsecs <= index)
+ return -pte_internal;
+
+ ifix->iscache.section[index] = section;
+ ifix->iscache.laddr[index] = laddr;
+
+ index += 1;
+ ifix->iscache.nsecs = index;
+
+ return index;
+}
+
+const char *pt_section_filename(const struct pt_section *section)
+{
+ if (!section)
+ return NULL;
+
+ return section->filename;
+}
+
+uint64_t pt_section_offset(const struct pt_section *section)
+{
+ if (!section)
+ return 0ull;
+
+ return section->offset;
+}
+
+uint64_t pt_section_size(const struct pt_section *section)
+{
+ if (!section)
+ return 0ull;
+
+ return section->size;
+}
+
+struct pt_section *pt_mk_section(const char *file, uint64_t offset,
+ uint64_t size)
+{
+ (void) file;
+ (void) offset;
+ (void) size;
+
+ /* This function is not used by our tests. */
+ return NULL;
+}
+
+int pt_section_get(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+ section->ucount += 1;
+ return 0;
+}
+
+int pt_section_put(struct pt_section *section)
+{
+ struct ifix_status *status;
+ uint16_t ucount;
+
+ if (!section)
+ return -pte_internal;
+
+ status = section->status;
+ if (!status)
+ return -pte_internal;
+
+ ucount = section->ucount;
+ if (!ucount) {
+ status->bad_put += 1;
+
+ return -pte_internal;
+ }
+
+ ucount = --section->ucount;
+ if (!ucount) {
+ status->deleted += 1;
+
+ if (status->deleted > 1)
+ return -pte_internal;
+ }
+
+ return 0;
+}
+
+int pt_iscache_lookup(struct pt_image_section_cache *iscache,
+ struct pt_section **section, uint64_t *laddr, int isid)
+{
+ if (!iscache || !section || !laddr)
+ return -pte_internal;
+
+ if (!isid || iscache->nsecs < isid)
+ return -pte_bad_image;
+
+ isid -= 1;
+
+ *section = iscache->section[isid];
+ *laddr = iscache->laddr[isid];
+
+ return pt_section_get(*section);
+}
+
+static int ifix_unmap(struct pt_section *section)
+{
+ uint16_t mcount;
+
+ if (!section)
+ return -pte_internal;
+
+ mcount = section->mcount;
+ if (!mcount)
+ return -pte_internal;
+
+ if (!section->mapping)
+ return -pte_internal;
+
+ mcount = --section->mcount;
+ if (!mcount)
+ section->mapping = NULL;
+
+ return 0;
+}
+
+static int ifix_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ struct ifix_mapping *mapping;
+ uint64_t begin, end;
+
+ if (!section || !buffer)
+ return -pte_internal;
+
+ begin = offset;
+ end = begin + size;
+
+ if (end < begin)
+ return -pte_nomap;
+
+ mapping = section->mapping;
+ if (!mapping)
+ return -pte_nomap;
+
+ if (mapping->errcode)
+ return mapping->errcode;
+
+ if (mapping->size <= begin)
+ return -pte_nomap;
+
+ if (mapping->size < end) {
+ end = mapping->size;
+ size = (uint16_t) (end - begin);
+ }
+
+ memcpy(buffer, &mapping->content[begin], size);
+
+ return size;
+}
+
+int pt_section_map(struct pt_section *section)
+{
+ struct ifix_status *status;
+ uint16_t mcount;
+
+ if (!section)
+ return -pte_internal;
+
+ mcount = section->mcount++;
+ if (mcount)
+ return 0;
+
+ if (section->mapping)
+ return -pte_internal;
+
+ status = section->status;
+ if (!status)
+ return -pte_internal;
+
+ section->mapping = status->mapping;
+ section->unmap = ifix_unmap;
+ section->read = ifix_read;
+
+ return 0;
+}
+
+int pt_section_on_map_lock(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+ return 0;
+}
+
+int pt_section_unmap(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+ if (!section->unmap)
+ return -pte_nomap;
+
+ return section->unmap(section);
+}
+
+int pt_section_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ if (!section)
+ return -pte_internal;
+
+ if (!section->read)
+ return -pte_nomap;
+
+ return section->read(section, buffer, size, offset);
+}
+
+/* A test read memory callback. */
+static int image_readmem_callback(uint8_t *buffer, size_t size,
+ const struct pt_asid *asid,
+ uint64_t ip, void *context)
+{
+ const uint8_t *memory;
+ size_t idx;
+
+ (void) asid;
+
+ if (!buffer)
+ return -pte_invalid;
+
+ /* We use a constant offset of 0x3000. */
+ if (ip < 0x3000ull)
+ return -pte_nomap;
+
+ ip -= 0x3000ull;
+
+ memory = (const uint8_t *) context;
+ if (!memory)
+ return -pte_internal;
+
+ for (idx = 0; idx < size; ++idx)
+ buffer[idx] = memory[ip + idx];
+
+ return (int) idx;
+}
+
+static struct ptunit_result init(void)
+{
+ struct pt_image image;
+
+ memset(&image, 0xcd, sizeof(image));
+
+ pt_image_init(&image, NULL);
+ ptu_null(image.name);
+ ptu_null(image.sections);
+ ptu_null((void *) (uintptr_t) image.readmem.callback);
+ ptu_null(image.readmem.context);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_name(struct image_fixture *ifix)
+{
+ memset(&ifix->image, 0xcd, sizeof(ifix->image));
+
+ pt_image_init(&ifix->image, "image-name");
+ ptu_str_eq(ifix->image.name, "image-name");
+ ptu_null(ifix->image.sections);
+ ptu_null((void *) (uintptr_t) ifix->image.readmem.callback);
+ ptu_null(ifix->image.readmem.context);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_null(void)
+{
+ pt_image_init(NULL, NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fini(void)
+{
+ struct ifix_mapping mapping;
+ struct ifix_status status;
+ struct pt_section section;
+ struct pt_image image;
+ struct pt_asid asid;
+ int errcode;
+
+ pt_asid_init(&asid);
+ ifix_init_section(&section, NULL, &status, &mapping, NULL);
+
+ pt_image_init(&image, NULL);
+ errcode = pt_image_add(&image, &section, &asid, 0x0ull, 0);
+ ptu_int_eq(errcode, 0);
+
+ pt_image_fini(&image);
+ ptu_int_eq(section.ucount, 0);
+ ptu_int_eq(section.mcount, 0);
+ ptu_int_eq(status.deleted, 1);
+ ptu_int_eq(status.bad_put, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fini_empty(void)
+{
+ struct pt_image image;
+
+ pt_image_init(&image, NULL);
+ pt_image_fini(&image);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fini_null(void)
+{
+ pt_image_fini(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result name(struct image_fixture *ifix)
+{
+ const char *name;
+
+ pt_image_init(&ifix->image, "image-name");
+
+ name = pt_image_name(&ifix->image);
+ ptu_str_eq(name, "image-name");
+
+ return ptu_passed();
+}
+
+static struct ptunit_result name_none(void)
+{
+ struct pt_image image;
+ const char *name;
+
+ pt_image_init(&image, NULL);
+
+ name = pt_image_name(&image);
+ ptu_null(name);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result name_null(void)
+{
+ const char *name;
+
+ name = pt_image_name(NULL);
+ ptu_null(name);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_empty(struct image_fixture *ifix)
+{
+ struct pt_asid asid;
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ pt_asid_init(&asid);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &asid, 0x1000ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0xcc);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result overlap_front(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1001ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1000ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1010ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x0f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ buffer[0] = 0xcc;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x100full);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x0f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result overlap_back(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1001ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1010ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x0f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result overlap_multiple(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1010ull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1008ull, 3);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1007ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x07);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1008ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1017ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x0f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1018ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x08);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result overlap_mid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ ifix->section[1].size = 0x8;
+ ifix->mapping[1].size = 0x8;
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1004ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1004ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x100bull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x07);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x100cull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x0c);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result contained(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ ifix->section[0].size = 0x8;
+ ifix->mapping[0].size = 0x8;
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1004ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1000ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1008ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x08);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result contained_multiple(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ ifix->section[0].size = 0x2;
+ ifix->mapping[0].size = 0x2;
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1004ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1008ull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1000ull, 3);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1004ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x04);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1008ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x08);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result contained_back(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ ifix->section[0].size = 0x8;
+ ifix->mapping[0].size = 0x8;
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1004ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x100cull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1000ull, 3);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1004ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x04);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x100cull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x0c);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x100full);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x0f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1010ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x04);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result same(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1008ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x08);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result same_different_isid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1008ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x08);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result same_different_offset(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc }, i;
+ int status, isid, index;
+
+ /* Add another section from a different part of the same file as an
+ * existing section.
+ */
+ index = ifix_add_section(ifix, ifix->section[0].filename);
+ ptu_int_gt(index, 0);
+
+ ifix->section[index].offset = ifix->section[0].offset + 0x10;
+ ptu_uint_eq(ifix->section[index].size, ifix->section[0].size);
+
+ /* Change the content of the new section so we can distinguish them. */
+ for (i = 0; i < ifix->mapping[index].size; ++i)
+ ifix->mapping[index].content[i] += 0x10;
+
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 0);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[index],
+ &ifix->asid[0], 0x1000ull, 0);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 0);
+ ptu_uint_eq(buffer[0], 0x10);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x100full);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 0);
+ ptu_uint_eq(buffer[0], 0x1f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result adjacent(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1000ull - ifix->section[1].size, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[2], &ifix->asid[0],
+ 0x1000ull + ifix->section[0].size, 3);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0xfffull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0],
+ ifix->mapping[1].content[ifix->mapping[1].size - 1]);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1000ull + ifix->section[0].size);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_null(struct image_fixture *ifix)
+{
+ uint8_t buffer;
+ int status, isid;
+
+ status = pt_image_read(NULL, &isid, &buffer, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_image_read(&ifix->image, NULL, &buffer, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_image_read(&ifix->image, &isid, NULL, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_image_read(&ifix->image, &isid, &buffer, 1, NULL,
+ 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[1],
+ 0x1008ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1009ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x09);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[1],
+ 0x1009ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_bad_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x2003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0xcc);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_null_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, NULL, 0x2003ull);
+ ptu_int_eq(status, -pte_internal);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0xcc);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_callback(struct image_fixture *ifix)
+{
+ uint8_t memory[] = { 0xdd, 0x01, 0x02, 0xdd };
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_set_callback(&ifix->image, image_readmem_callback,
+ memory);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x3001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 0);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_nomem(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[1], 0x1010ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0xcc);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_truncated(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x100full);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x0f);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_error(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc };
+ int status, isid;
+
+ ifix->mapping[0].errcode = -pte_nosync;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, -pte_nosync);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_spurious_error(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ ifix->mapping[0].errcode = -pte_nosync;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 1, &ifix->asid[0],
+ 0x1005ull);
+ ptu_int_eq(status, -pte_nosync);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x00);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_section(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove(&ifix->image, &ifix->section[0],
+ &ifix->asid[0], 0x1000ull);
+ ptu_int_eq(status, 0);
+
+ ptu_int_ne(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x1003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_bad_vaddr(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove(&ifix->image, &ifix->section[0],
+ &ifix->asid[0], 0x2000ull);
+ ptu_int_eq(status, -pte_bad_image);
+
+ ptu_int_eq(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2005ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x05);
+ ptu_uint_eq(buffer[1], 0x06);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_bad_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove(&ifix->image, &ifix->section[0],
+ &ifix->asid[1], 0x1000ull);
+ ptu_int_eq(status, -pte_bad_image);
+
+ ptu_int_eq(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2005ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x05);
+ ptu_uint_eq(buffer[1], 0x06);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_by_filename(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove_by_filename(&ifix->image,
+ ifix->section[0].filename,
+ &ifix->asid[0]);
+ ptu_int_eq(status, 1);
+
+ ptu_int_ne(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x1003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+remove_by_filename_bad_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove_by_filename(&ifix->image,
+ ifix->section[0].filename,
+ &ifix->asid[1]);
+ ptu_int_eq(status, 0);
+
+ ptu_int_eq(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2005ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x05);
+ ptu_uint_eq(buffer[1], 0x06);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_none_by_filename(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_remove_by_filename(&ifix->image, "bad-name",
+ &ifix->asid[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_int_eq(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_all_by_filename(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ ifix->section[0].filename = "same-name";
+ ifix->section[1].filename = "same-name";
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x2000ull, 2);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove_by_filename(&ifix->image, "same-name",
+ &ifix->asid[0]);
+ ptu_int_eq(status, 2);
+
+ ptu_int_ne(ifix->status[0].deleted, 0);
+ ptu_int_ne(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x1003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x2003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result remove_by_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1001ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 10);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_image_remove_by_asid(&ifix->image, &ifix->asid[0]);
+ ptu_int_eq(status, 1);
+
+ ptu_int_ne(ifix->status[0].deleted, 0);
+ ptu_int_eq(ifix->status[1].deleted, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, sizeof(buffer),
+ &ifix->asid[0], 0x1003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0x02);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_empty(struct image_fixture *ifix)
+{
+ struct pt_asid asid;
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ pt_asid_init(&asid);
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, sizeof(buffer),
+ &asid, 0x1000ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+ ptu_uint_eq(buffer[0], 0xcc);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_self(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_copy(&ifix->image, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_shrink(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->copy, &ifix->section[1], &ifix->asid[1],
+ 0x2000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 2, &ifix->asid[1],
+ 0x2003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 11);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_split(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->copy, &ifix->section[0], &ifix->asid[0],
+ 0x2000ull, 1);
+ ptu_int_eq(status, 0);
+
+ ifix->section[1].size = 0x7;
+ ifix->mapping[1].size = 0x7;
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x2001ull, 2);
+ ptu_int_eq(status, 0);
+
+ ifix->section[2].size = 0x8;
+ ifix->mapping[2].size = 0x8;
+
+ status = pt_image_add(&ifix->image, &ifix->section[2], &ifix->asid[0],
+ 0x2008ull, 3);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2003ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x02);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2009ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x01);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2000ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x00);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_merge(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ ifix->section[1].size = 0x8;
+ ifix->mapping[1].size = 0x8;
+
+ status = pt_image_add(&ifix->copy, &ifix->section[1], &ifix->asid[0],
+ 0x2000ull, 1);
+ ptu_int_eq(status, 0);
+
+ ifix->section[2].size = 0x8;
+ ifix->mapping[2].size = 0x8;
+
+ status = pt_image_add(&ifix->copy, &ifix->section[2], &ifix->asid[0],
+ 0x2008ull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x2000ull, 3);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2003ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x200aull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x0a);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_overlap(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add(&ifix->copy, &ifix->section[0], &ifix->asid[0],
+ 0x2000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->copy, &ifix->section[1], &ifix->asid[0],
+ 0x2010ull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[2], &ifix->asid[0],
+ 0x2008ull, 3);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2003ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 1);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x200aull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x02);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2016ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 3);
+ ptu_uint_eq(buffer[0], 0x0e);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 1, &ifix->asid[0],
+ 0x2019ull);
+ ptu_int_eq(status, 1);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x09);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result copy_replace(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ ifix->section[0].size = 0x8;
+ ifix->mapping[0].size = 0x8;
+
+ status = pt_image_add(&ifix->copy, &ifix->section[0], &ifix->asid[0],
+ 0x1004ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[0],
+ 0x1000ull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_copy(&ifix->copy, &ifix->image);
+ ptu_int_eq(status, 0);
+
+ isid = -1;
+ status = pt_image_read(&ifix->copy, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(isid, 2);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_cached_null(void)
+{
+ struct pt_image_section_cache iscache;
+ struct pt_image image;
+ int status;
+
+ status = pt_image_add_cached(NULL, &iscache, 0, NULL);
+ ptu_int_eq(status, -pte_invalid);
+
+ status = pt_image_add_cached(&image, NULL, 0, NULL);
+ ptu_int_eq(status, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_cached(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid, risid;
+
+ isid = ifix_cache_section(ifix, &ifix->section[0], 0x1000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_image_add_cached(&ifix->image, &ifix->iscache, isid,
+ &ifix->asid[0]);
+ ptu_int_eq(status, 0);
+
+ risid = -1;
+ status = pt_image_read(&ifix->image, &risid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(risid, isid);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_cached_null_asid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid, risid;
+
+ isid = ifix_cache_section(ifix, &ifix->section[0], 0x1000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_image_add_cached(&ifix->image, &ifix->iscache, isid, NULL);
+ ptu_int_eq(status, 0);
+
+ risid = -1;
+ status = pt_image_read(&ifix->image, &risid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(risid, isid);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_cached_twice(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid, risid;
+
+ isid = ifix_cache_section(ifix, &ifix->section[0], 0x1000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_image_add_cached(&ifix->image, &ifix->iscache, isid,
+ &ifix->asid[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add_cached(&ifix->image, &ifix->iscache, isid,
+ &ifix->asid[0]);
+ ptu_int_eq(status, 0);
+
+ risid = -1;
+ status = pt_image_read(&ifix->image, &risid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, 2);
+ ptu_int_eq(risid, isid);
+ ptu_uint_eq(buffer[0], 0x03);
+ ptu_uint_eq(buffer[1], 0x04);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_cached_bad_isid(struct image_fixture *ifix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ status = pt_image_add_cached(&ifix->image, &ifix->iscache, 1,
+ &ifix->asid[0]);
+ ptu_int_eq(status, -pte_bad_image);
+
+ isid = -1;
+ status = pt_image_read(&ifix->image, &isid, buffer, 2, &ifix->asid[0],
+ 0x1003ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_int_eq(isid, -1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_null(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int status;
+
+ status = pt_image_find(NULL, &msec, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_image_find(&ifix->image, NULL, &ifix->asid[0],
+ 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_image_find(&ifix->image, &msec, NULL, 0x1000ull);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int status;
+
+ status = pt_image_find(&ifix->image, &msec, &ifix->asid[1], 0x2003ull);
+ ptu_int_eq(status, 11);
+ ptu_ptr_eq(msec.section, &ifix->section[1]);
+ ptu_uint_eq(msec.vaddr, 0x2000ull);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_asid(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int status;
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 1);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[1],
+ 0x1008ull, 2);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1009ull);
+ ptu_int_eq(status, 1);
+ ptu_ptr_eq(msec.section, &ifix->section[0]);
+ ptu_uint_eq(msec.vaddr, 0x1000ull);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_find(&ifix->image, &msec, &ifix->asid[1], 0x1009ull);
+ ptu_int_eq(status, 2);
+ ptu_ptr_eq(msec.section, &ifix->section[0]);
+ ptu_uint_eq(msec.vaddr, 0x1008ull);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_bad_asid(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int status;
+
+ status = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x2003ull);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_nomem(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int status;
+
+ status = pt_image_find(&ifix->image, &msec, &ifix->asid[1], 0x1010ull);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate_null(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int status;
+
+ status = pt_image_validate(NULL, &msec, 0x1004ull, 10);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_image_validate(&ifix->image, NULL, 0x1004ull, 10);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int isid, status;
+
+ isid = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1003ull);
+ ptu_int_ge(isid, 0);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_validate(&ifix->image, &msec, 0x1004ull, isid);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate_bad_asid(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int isid, status;
+
+ isid = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1003ull);
+ ptu_int_ge(isid, 0);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ msec.asid = ifix->asid[1];
+
+ status = pt_image_validate(&ifix->image, &msec, 0x1004ull, isid);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate_bad_vaddr(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int isid, status;
+
+ isid = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1003ull);
+ ptu_int_ge(isid, 0);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ msec.vaddr = 0x2000ull;
+
+ status = pt_image_validate(&ifix->image, &msec, 0x1004ull, isid);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate_bad_offset(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int isid, status;
+
+ isid = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1003ull);
+ ptu_int_ge(isid, 0);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ msec.offset = 0x8ull;
+
+ status = pt_image_validate(&ifix->image, &msec, 0x1004ull, isid);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate_bad_size(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int isid, status;
+
+ isid = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1003ull);
+ ptu_int_ge(isid, 0);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ msec.size = 0x8ull;
+
+ status = pt_image_validate(&ifix->image, &msec, 0x1004ull, isid);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result validate_bad_isid(struct image_fixture *ifix)
+{
+ struct pt_mapped_section msec;
+ int isid, status;
+
+ isid = pt_image_find(&ifix->image, &msec, &ifix->asid[0], 0x1003ull);
+ ptu_int_ge(isid, 0);
+
+ status = pt_section_put(msec.section);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_validate(&ifix->image, &msec, 0x1004ull, isid + 1);
+ ptu_int_eq(status, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ifix_init(struct image_fixture *ifix)
+{
+ int index;
+
+ pt_image_init(&ifix->image, NULL);
+ pt_image_init(&ifix->copy, NULL);
+
+ memset(ifix->status, 0, sizeof(ifix->status));
+ memset(ifix->mapping, 0, sizeof(ifix->mapping));
+ memset(ifix->section, 0, sizeof(ifix->section));
+ memset(&ifix->iscache, 0, sizeof(ifix->iscache));
+
+ ifix->nsecs = 0;
+
+ index = ifix_add_section(ifix, "file-0");
+ ptu_int_eq(index, 0);
+
+ index = ifix_add_section(ifix, "file-1");
+ ptu_int_eq(index, 1);
+
+ index = ifix_add_section(ifix, "file-2");
+ ptu_int_eq(index, 2);
+
+ pt_asid_init(&ifix->asid[0]);
+ ifix->asid[0].cr3 = 0xa000;
+
+ pt_asid_init(&ifix->asid[1]);
+ ifix->asid[1].cr3 = 0xb000;
+
+ pt_asid_init(&ifix->asid[2]);
+ ifix->asid[2].cr3 = 0xc000;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result rfix_init(struct image_fixture *ifix)
+{
+ int status;
+
+ ptu_check(ifix_init, ifix);
+
+ status = pt_image_add(&ifix->image, &ifix->section[0], &ifix->asid[0],
+ 0x1000ull, 10);
+ ptu_int_eq(status, 0);
+
+ status = pt_image_add(&ifix->image, &ifix->section[1], &ifix->asid[1],
+ 0x2000ull, 11);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result dfix_fini(struct image_fixture *ifix)
+{
+ pt_image_fini(&ifix->image);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ifix_fini(struct image_fixture *ifix)
+{
+ int sec;
+
+ ptu_check(dfix_fini, ifix);
+
+ pt_image_fini(&ifix->copy);
+
+ for (sec = 0; sec < ifix_nsecs; ++sec) {
+ ptu_int_eq(ifix->section[sec].ucount, 0);
+ ptu_int_eq(ifix->section[sec].mcount, 0);
+ ptu_int_le(ifix->status[sec].deleted, 1);
+ ptu_int_eq(ifix->status[sec].bad_put, 0);
+ }
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct image_fixture dfix, ifix, rfix;
+ struct ptunit_suite suite;
+
+ /* Dfix provides image destruction. */
+ dfix.init = NULL;
+ dfix.fini = dfix_fini;
+
+ /* Ifix provides an empty image. */
+ ifix.init = ifix_init;
+ ifix.fini = ifix_fini;
+
+ /* Rfix provides an image with two sections added. */
+ rfix.init = rfix_init;
+ rfix.fini = ifix_fini;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init);
+ ptu_run_f(suite, init_name, dfix);
+ ptu_run(suite, init_null);
+
+ ptu_run(suite, fini);
+ ptu_run(suite, fini_empty);
+ ptu_run(suite, fini_null);
+
+ ptu_run_f(suite, name, dfix);
+ ptu_run(suite, name_none);
+ ptu_run(suite, name_null);
+
+ ptu_run_f(suite, read_empty, ifix);
+ ptu_run_f(suite, overlap_front, ifix);
+ ptu_run_f(suite, overlap_back, ifix);
+ ptu_run_f(suite, overlap_multiple, ifix);
+ ptu_run_f(suite, overlap_mid, ifix);
+ ptu_run_f(suite, contained, ifix);
+ ptu_run_f(suite, contained_multiple, ifix);
+ ptu_run_f(suite, contained_back, ifix);
+ ptu_run_f(suite, same, ifix);
+ ptu_run_f(suite, same_different_isid, ifix);
+ ptu_run_f(suite, same_different_offset, ifix);
+ ptu_run_f(suite, adjacent, ifix);
+
+ ptu_run_f(suite, read_null, rfix);
+ ptu_run_f(suite, read, rfix);
+ ptu_run_f(suite, read_null, rfix);
+ ptu_run_f(suite, read_asid, ifix);
+ ptu_run_f(suite, read_bad_asid, rfix);
+ ptu_run_f(suite, read_null_asid, rfix);
+ ptu_run_f(suite, read_callback, rfix);
+ ptu_run_f(suite, read_nomem, rfix);
+ ptu_run_f(suite, read_truncated, rfix);
+ ptu_run_f(suite, read_error, rfix);
+ ptu_run_f(suite, read_spurious_error, rfix);
+
+ ptu_run_f(suite, remove_section, rfix);
+ ptu_run_f(suite, remove_bad_vaddr, rfix);
+ ptu_run_f(suite, remove_bad_asid, rfix);
+ ptu_run_f(suite, remove_by_filename, rfix);
+ ptu_run_f(suite, remove_by_filename_bad_asid, rfix);
+ ptu_run_f(suite, remove_none_by_filename, rfix);
+ ptu_run_f(suite, remove_all_by_filename, ifix);
+ ptu_run_f(suite, remove_by_asid, rfix);
+
+ ptu_run_f(suite, copy_empty, ifix);
+ ptu_run_f(suite, copy, rfix);
+ ptu_run_f(suite, copy_self, rfix);
+ ptu_run_f(suite, copy_shrink, rfix);
+ ptu_run_f(suite, copy_split, ifix);
+ ptu_run_f(suite, copy_merge, ifix);
+ ptu_run_f(suite, copy_overlap, ifix);
+ ptu_run_f(suite, copy_replace, ifix);
+
+ ptu_run(suite, add_cached_null);
+ ptu_run_f(suite, add_cached, ifix);
+ ptu_run_f(suite, add_cached_null_asid, ifix);
+ ptu_run_f(suite, add_cached_twice, ifix);
+ ptu_run_f(suite, add_cached_bad_isid, ifix);
+
+ ptu_run_f(suite, find_null, rfix);
+ ptu_run_f(suite, find, rfix);
+ ptu_run_f(suite, find_asid, ifix);
+ ptu_run_f(suite, find_bad_asid, rfix);
+ ptu_run_f(suite, find_nomem, rfix);
+
+ ptu_run_f(suite, validate_null, rfix);
+ ptu_run_f(suite, validate, rfix);
+ ptu_run_f(suite, validate_bad_asid, rfix);
+ ptu_run_f(suite, validate_bad_vaddr, rfix);
+ ptu_run_f(suite, validate_bad_offset, rfix);
+ ptu_run_f(suite, validate_bad_size, rfix);
+ ptu_run_f(suite, validate_bad_isid, rfix);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-image_section_cache.c b/contrib/processor-trace/libipt/test/src/ptunit-image_section_cache.c
new file mode 100644
index 0000000000000..1b460b00d9961
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-image_section_cache.c
@@ -0,0 +1,2027 @@
+/*
+ * Copyright (c) 2016-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_image_section_cache.h"
+
+#include "ptunit_threads.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+
+
+struct pt_section {
+ /* The filename. We only support string literals for testing. */
+ const char *filename;
+
+ /* The file offset and size. */
+ uint64_t offset;
+ uint64_t size;
+
+ /* The bcache size. */
+ uint64_t bcsize;
+
+ /* The iscache back link. */
+ struct pt_image_section_cache *iscache;
+
+ /* The file content. */
+ uint8_t content[0x10];
+
+ /* The use count. */
+ int ucount;
+
+ /* The attach count. */
+ int acount;
+
+ /* The map count. */
+ int mcount;
+
+#if defined(FEATURE_THREADS)
+ /* A lock protecting this section. */
+ mtx_t lock;
+ /* A lock protecting the iscache and acount fields. */
+ mtx_t alock;
+#endif /* defined(FEATURE_THREADS) */
+};
+
+extern struct pt_section *pt_mk_section(const char *filename, uint64_t offset,
+ uint64_t size);
+
+extern int pt_section_get(struct pt_section *section);
+extern int pt_section_put(struct pt_section *section);
+extern int pt_section_attach(struct pt_section *section,
+ struct pt_image_section_cache *iscache);
+extern int pt_section_detach(struct pt_section *section,
+ struct pt_image_section_cache *iscache);
+
+extern int pt_section_map(struct pt_section *section);
+extern int pt_section_map_share(struct pt_section *section);
+extern int pt_section_unmap(struct pt_section *section);
+extern int pt_section_request_bcache(struct pt_section *section);
+
+extern const char *pt_section_filename(const struct pt_section *section);
+extern uint64_t pt_section_offset(const struct pt_section *section);
+extern uint64_t pt_section_size(const struct pt_section *section);
+extern int pt_section_memsize(struct pt_section *section, uint64_t *size);
+
+extern int pt_section_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset);
+
+
+struct pt_section *pt_mk_section(const char *filename, uint64_t offset,
+ uint64_t size)
+{
+ struct pt_section *section;
+
+ section = malloc(sizeof(*section));
+ if (section) {
+ uint8_t idx;
+
+ memset(section, 0, sizeof(*section));
+ section->filename = filename;
+ section->offset = offset;
+ section->size = size;
+ section->ucount = 1;
+
+ for (idx = 0; idx < sizeof(section->content); ++idx)
+ section->content[idx] = idx;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_init(&section->lock, mtx_plain);
+ if (errcode != thrd_success) {
+ free(section);
+ section = NULL;
+ }
+
+ errcode = mtx_init(&section->alock, mtx_plain);
+ if (errcode != thrd_success) {
+ mtx_destroy(&section->lock);
+ free(section);
+ section = NULL;
+ }
+ }
+#endif /* defined(FEATURE_THREADS) */
+ }
+
+ return section;
+}
+
+static int pt_section_lock(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&section->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static int pt_section_unlock(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&section->lock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static int pt_section_lock_attach(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_lock(&section->alock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+static int pt_section_unlock_attach(struct pt_section *section)
+{
+ if (!section)
+ return -pte_internal;
+
+#if defined(FEATURE_THREADS)
+ {
+ int errcode;
+
+ errcode = mtx_unlock(&section->alock);
+ if (errcode != thrd_success)
+ return -pte_bad_lock;
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ return 0;
+}
+
+int pt_section_get(struct pt_section *section)
+{
+ int errcode, ucount;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ ucount = ++section->ucount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (!ucount)
+ return -pte_internal;
+
+ return 0;
+}
+
+int pt_section_put(struct pt_section *section)
+{
+ int errcode, ucount;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ ucount = --section->ucount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (!ucount) {
+#if defined(FEATURE_THREADS)
+ mtx_destroy(&section->alock);
+ mtx_destroy(&section->lock);
+#endif /* defined(FEATURE_THREADS) */
+ free(section);
+ }
+
+ return 0;
+}
+
+int pt_section_attach(struct pt_section *section,
+ struct pt_image_section_cache *iscache)
+{
+ int errcode, ucount, acount;
+
+ if (!section || !iscache)
+ return -pte_internal;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ ucount = section->ucount;
+ acount = section->acount;
+ if (!acount) {
+ if (section->iscache || !ucount)
+ goto out_unlock;
+
+ section->iscache = iscache;
+ section->acount = 1;
+
+ return pt_section_unlock_attach(section);
+ }
+
+ acount += 1;
+ if (!acount) {
+ (void) pt_section_unlock_attach(section);
+ return -pte_overflow;
+ }
+
+ if (ucount < acount)
+ goto out_unlock;
+
+ if (section->iscache != iscache)
+ goto out_unlock;
+
+ section->acount = acount;
+
+ return pt_section_unlock_attach(section);
+
+ out_unlock:
+ (void) pt_section_unlock_attach(section);
+ return -pte_internal;
+}
+
+int pt_section_detach(struct pt_section *section,
+ struct pt_image_section_cache *iscache)
+{
+ int errcode, ucount, acount;
+
+ if (!section || !iscache)
+ return -pte_internal;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (section->iscache != iscache)
+ goto out_unlock;
+
+ acount = section->acount;
+ if (!acount)
+ goto out_unlock;
+
+ acount -= 1;
+ ucount = section->ucount;
+ if (ucount < acount)
+ goto out_unlock;
+
+ section->acount = acount;
+ if (!acount)
+ section->iscache = NULL;
+
+ return pt_section_unlock_attach(section);
+
+ out_unlock:
+ (void) pt_section_unlock_attach(section);
+ return -pte_internal;
+}
+
+int pt_section_map(struct pt_section *section)
+{
+ struct pt_image_section_cache *iscache;
+ int errcode, status;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_map_share(section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ status = 0;
+ iscache = section->iscache;
+ if (iscache)
+ status = pt_iscache_notify_map(iscache, section);
+
+ errcode = pt_section_unlock_attach(section);
+
+ return (status < 0) ? status : errcode;
+}
+
+int pt_section_map_share(struct pt_section *section)
+{
+ int errcode, mcount;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ mcount = ++section->mcount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (mcount <= 0)
+ return -pte_internal;
+
+ return 0;
+}
+
+int pt_section_unmap(struct pt_section *section)
+{
+ int errcode, mcount;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ section->bcsize = 0ull;
+ mcount = --section->mcount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ if (mcount < 0)
+ return -pte_internal;
+
+ return 0;
+}
+
+int pt_section_request_bcache(struct pt_section *section)
+{
+ struct pt_image_section_cache *iscache;
+ uint64_t memsize;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock_attach(section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ goto out_alock;
+
+ if (section->bcsize)
+ goto out_lock;
+
+ section->bcsize = section->size * 3;
+ memsize = section->size + section->bcsize;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ goto out_alock;
+
+ iscache = section->iscache;
+ if (iscache) {
+ errcode = pt_iscache_notify_resize(iscache, section, memsize);
+ if (errcode < 0)
+ goto out_alock;
+ }
+
+ return pt_section_unlock_attach(section);
+
+
+out_lock:
+ (void) pt_section_unlock(section);
+
+out_alock:
+ (void) pt_section_unlock_attach(section);
+ return errcode;
+}
+
+const char *pt_section_filename(const struct pt_section *section)
+{
+ if (!section)
+ return NULL;
+
+ return section->filename;
+}
+
+uint64_t pt_section_offset(const struct pt_section *section)
+{
+ if (!section)
+ return 0ull;
+
+ return section->offset;
+}
+
+uint64_t pt_section_size(const struct pt_section *section)
+{
+ if (!section)
+ return 0ull;
+
+ return section->size;
+}
+
+int pt_section_memsize(struct pt_section *section, uint64_t *size)
+{
+ if (!section || !size)
+ return -pte_internal;
+
+ *size = section->mcount ? section->size + section->bcsize : 0ull;
+
+ return 0;
+}
+
+int pt_section_read(const struct pt_section *section, uint8_t *buffer,
+ uint16_t size, uint64_t offset)
+{
+ uint64_t begin, end, max;
+
+ if (!section || !buffer)
+ return -pte_internal;
+
+ begin = offset;
+ end = begin + size;
+ max = sizeof(section->content);
+
+ if (max <= begin)
+ return -pte_nomap;
+
+ if (max < end)
+ end = max;
+
+ if (end <= begin)
+ return -pte_invalid;
+
+ memcpy(buffer, &section->content[begin], (size_t) (end - begin));
+ return (int) (end - begin);
+}
+
+enum {
+ /* The number of test sections. */
+ num_sections = 8,
+
+#if defined(FEATURE_THREADS)
+
+ num_threads = 8,
+
+#endif /* defined(FEATURE_THREADS) */
+
+ num_iterations = 0x1000
+};
+
+struct iscache_fixture {
+ /* Threading support. */
+ struct ptunit_thrd_fixture thrd;
+
+ /* The image section cache under test. */
+ struct pt_image_section_cache iscache;
+
+ /* A bunch of test sections. */
+ struct pt_section *section[num_sections];
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct iscache_fixture *);
+ struct ptunit_result (*fini)(struct iscache_fixture *);
+};
+
+static struct ptunit_result dfix_init(struct iscache_fixture *cfix)
+{
+ int idx;
+
+ ptu_test(ptunit_thrd_init, &cfix->thrd);
+
+ memset(cfix->section, 0, sizeof(cfix->section));
+
+ for (idx = 0; idx < num_sections; ++idx) {
+ struct pt_section *section;
+
+ section = pt_mk_section("some-filename",
+ idx % 3 == 0 ? 0x1000 : 0x2000,
+ idx % 2 == 0 ? 0x1000 : 0x2000);
+ ptu_ptr(section);
+
+ cfix->section[idx] = section;
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cfix_init(struct iscache_fixture *cfix)
+{
+ int errcode;
+
+ ptu_test(dfix_init, cfix);
+
+ errcode = pt_iscache_init(&cfix->iscache, NULL);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sfix_init(struct iscache_fixture *cfix)
+{
+ int status, idx;
+
+ ptu_test(cfix_init, cfix);
+
+ cfix->iscache.limit = 0x7800;
+
+ for (idx = 0; idx < num_sections; ++idx) {
+ status = pt_iscache_add(&cfix->iscache, cfix->section[idx],
+ 0ull);
+ ptu_int_ge(status, 0);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cfix_fini(struct iscache_fixture *cfix)
+{
+ int idx, errcode;
+
+ ptu_test(ptunit_thrd_fini, &cfix->thrd);
+
+ for (idx = 0; idx < cfix->thrd.nthreads; ++idx)
+ ptu_int_eq(cfix->thrd.result[idx], 0);
+
+ pt_iscache_fini(&cfix->iscache);
+
+ for (idx = 0; idx < num_sections; ++idx) {
+ ptu_int_eq(cfix->section[idx]->ucount, 1);
+ ptu_int_eq(cfix->section[idx]->acount, 0);
+ ptu_int_eq(cfix->section[idx]->mcount, 0);
+ ptu_null(cfix->section[idx]->iscache);
+
+ errcode = pt_section_put(cfix->section[idx]);
+ ptu_int_eq(errcode, 0);
+ }
+
+ return ptu_passed();
+}
+
+
+static struct ptunit_result init_null(void)
+{
+ int errcode;
+
+ errcode = pt_iscache_init(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fini_null(void)
+{
+ pt_iscache_fini(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result name_null(void)
+{
+ const char *name;
+
+ name = pt_iscache_name(NULL);
+ ptu_null(name);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_null(void)
+{
+ struct pt_image_section_cache iscache;
+ struct pt_section section;
+ int errcode;
+
+ errcode = pt_iscache_add(NULL, &section, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_iscache_add(&iscache, NULL, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_null(void)
+{
+ int errcode;
+
+ errcode = pt_iscache_find(NULL, "filename", 0ull, 0ull, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lookup_null(void)
+{
+ struct pt_image_section_cache iscache;
+ struct pt_section *section;
+ uint64_t laddr;
+ int errcode;
+
+ errcode = pt_iscache_lookup(NULL, &section, &laddr, 0);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_iscache_lookup(&iscache, NULL, &laddr, 0);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_iscache_lookup(&iscache, &section, NULL, 0);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result clear_null(void)
+{
+ int errcode;
+
+ errcode = pt_iscache_clear(NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result free_null(void)
+{
+ pt_iscache_free(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_file_null(void)
+{
+ struct pt_image_section_cache iscache;
+ int errcode;
+
+ errcode = pt_iscache_add_file(NULL, "filename", 0ull, 0ull, 0ull);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_iscache_add_file(&iscache, NULL, 0ull, 0ull, 0ull);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_null(void)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t buffer;
+ int errcode;
+
+ errcode = pt_iscache_read(NULL, &buffer, sizeof(buffer), 1ull, 0ull);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_iscache_read(&iscache, NULL, sizeof(buffer), 1ull, 0ull);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_iscache_read(&iscache, &buffer, 0ull, 1, 0ull);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_fini(struct iscache_fixture *cfix)
+{
+ (void) cfix;
+
+ /* The actual init and fini calls are in cfix_init() and cfix_fini(). */
+ return ptu_passed();
+}
+
+static struct ptunit_result name(struct iscache_fixture *cfix)
+{
+ const char *name;
+
+ pt_iscache_init(&cfix->iscache, "iscache-name");
+
+ name = pt_iscache_name(&cfix->iscache);
+ ptu_str_eq(name, "iscache-name");
+
+ return ptu_passed();
+}
+
+static struct ptunit_result name_none(struct iscache_fixture *cfix)
+{
+ const char *name;
+
+ pt_iscache_init(&cfix->iscache, NULL);
+
+ name = pt_iscache_name(&cfix->iscache);
+ ptu_null(name);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add(struct iscache_fixture *cfix)
+{
+ int isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid, 0);
+
+ /* The cache attaches and gets a reference on success. */
+ ptu_int_eq(cfix->section[0]->ucount, 2);
+ ptu_int_eq(cfix->section[0]->acount, 1);
+
+ /* The added section must be implicitly put in pt_iscache_fini. */
+ return ptu_passed();
+}
+
+static struct ptunit_result add_no_name(struct iscache_fixture *cfix)
+{
+ struct pt_section section;
+ int errcode;
+
+ memset(&section, 0, sizeof(section));
+
+ errcode = pt_iscache_add(&cfix->iscache, &section, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_file(struct iscache_fixture *cfix)
+{
+ int isid;
+
+ isid = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull);
+ ptu_int_gt(isid, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int found, isid;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ isid = pt_iscache_add(&cfix->iscache, section, 0ull);
+ ptu_int_gt(isid, 0);
+
+ found = pt_iscache_find(&cfix->iscache, section->filename,
+ section->offset, section->size, 0ull);
+ ptu_int_eq(found, isid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_empty(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int found;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ found = pt_iscache_find(&cfix->iscache, section->filename,
+ section->offset, section->size, 0ull);
+ ptu_int_eq(found, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_bad_filename(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int found, isid;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ isid = pt_iscache_add(&cfix->iscache, section, 0ull);
+ ptu_int_gt(isid, 0);
+
+ found = pt_iscache_find(&cfix->iscache, "bad-filename",
+ section->offset, section->size, 0ull);
+ ptu_int_eq(found, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_null_filename(struct iscache_fixture *cfix)
+{
+ int errcode;
+
+ errcode = pt_iscache_find(&cfix->iscache, NULL, 0ull, 0ull, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_bad_offset(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int found, isid;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ isid = pt_iscache_add(&cfix->iscache, section, 0ull);
+ ptu_int_gt(isid, 0);
+
+ found = pt_iscache_find(&cfix->iscache, section->filename, 0ull,
+ section->size, 0ull);
+ ptu_int_eq(found, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_bad_size(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int found, isid;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ isid = pt_iscache_add(&cfix->iscache, section, 0ull);
+ ptu_int_gt(isid, 0);
+
+ found = pt_iscache_find(&cfix->iscache, section->filename,
+ section->offset, 0ull, 0ull);
+ ptu_int_eq(found, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result find_bad_laddr(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int found, isid;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ isid = pt_iscache_add(&cfix->iscache, section, 0ull);
+ ptu_int_gt(isid, 0);
+
+ found = pt_iscache_find(&cfix->iscache, section->filename,
+ section->offset, section->size, 1ull);
+ ptu_int_eq(found, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lookup(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ uint64_t laddr;
+ int errcode, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid, 0);
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section, &laddr, isid);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(section, cfix->section[0]);
+ ptu_uint_eq(laddr, 0ull);
+
+ errcode = pt_section_put(section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lookup_bad_isid(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ uint64_t laddr;
+ int errcode, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid, 0);
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section, &laddr, 0);
+ ptu_int_eq(errcode, -pte_bad_image);
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section, &laddr, -isid);
+ ptu_int_eq(errcode, -pte_bad_image);
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section, &laddr, isid + 1);
+ ptu_int_eq(errcode, -pte_bad_image);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result clear_empty(struct iscache_fixture *cfix)
+{
+ int errcode;
+
+ errcode = pt_iscache_clear(&cfix->iscache);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result clear_find(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ int errcode, found, isid;
+
+ section = cfix->section[0];
+ ptu_ptr(section);
+
+ isid = pt_iscache_add(&cfix->iscache, section, 0ull);
+ ptu_int_gt(isid, 0);
+
+ errcode = pt_iscache_clear(&cfix->iscache);
+ ptu_int_eq(errcode, 0);
+
+
+ found = pt_iscache_find(&cfix->iscache, section->filename,
+ section->offset, section->size, 0ull);
+ ptu_int_eq(found, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result clear_lookup(struct iscache_fixture *cfix)
+{
+ struct pt_section *section;
+ uint64_t laddr;
+ int errcode, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid, 0);
+
+ errcode = pt_iscache_clear(&cfix->iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section, &laddr, isid);
+ ptu_int_eq(errcode, -pte_bad_image);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_twice(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* The second add should be ignored. */
+ ptu_int_eq(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_same(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ cfix->section[1]->offset = cfix->section[0]->offset;
+ cfix->section[1]->size = cfix->section[0]->size;
+
+ isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[1], 0ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* The second add should be ignored. */
+ ptu_int_eq(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+add_twice_different_laddr(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[0], 1ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* We must get different identifiers. */
+ ptu_int_ne(isid[1], isid[0]);
+
+ /* We attach twice and take two references - one for each entry. */
+ ptu_int_eq(cfix->section[0]->ucount, 3);
+ ptu_int_eq(cfix->section[0]->acount, 2);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+add_same_different_laddr(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ cfix->section[1]->offset = cfix->section[0]->offset;
+ cfix->section[1]->size = cfix->section[0]->size;
+
+ isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[1], 1ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* We must get different identifiers. */
+ ptu_int_ne(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+add_different_same_laddr(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add(&cfix->iscache, cfix->section[0], 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ isid[1] = pt_iscache_add(&cfix->iscache, cfix->section[1], 0ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* We must get different identifiers. */
+ ptu_int_ne(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result add_file_same(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ isid[1] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* The second add should be ignored. */
+ ptu_int_eq(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+add_file_same_different_laddr(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ isid[1] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 1ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* We must get different identifiers. */
+ ptu_int_ne(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+add_file_different_same_laddr(struct iscache_fixture *cfix)
+{
+ int isid[2];
+
+ isid[0] = pt_iscache_add_file(&cfix->iscache, "name", 0ull, 1ull, 0ull);
+ ptu_int_gt(isid[0], 0);
+
+ isid[1] = pt_iscache_add_file(&cfix->iscache, "name", 1ull, 1ull, 0ull);
+ ptu_int_gt(isid[1], 0);
+
+ /* We must get different identifiers. */
+ ptu_int_ne(isid[1], isid[0]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read(struct iscache_fixture *cfix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_iscache_read(&cfix->iscache, buffer, 2ull, isid, 0xa008ull);
+ ptu_int_eq(status, 2);
+ ptu_uint_eq(buffer[0], 0x8);
+ ptu_uint_eq(buffer[1], 0x9);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_truncate(struct iscache_fixture *cfix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc };
+ int status, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_iscache_read(&cfix->iscache, buffer, sizeof(buffer), isid,
+ 0xa00full);
+ ptu_int_eq(status, 1);
+ ptu_uint_eq(buffer[0], 0xf);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_bad_vaddr(struct iscache_fixture *cfix)
+{
+ uint8_t buffer[] = { 0xcc };
+ int status, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_iscache_read(&cfix->iscache, buffer, 1ull, isid, 0xb000ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_bad_isid(struct iscache_fixture *cfix)
+{
+ uint8_t buffer[] = { 0xcc };
+ int status, isid;
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_iscache_read(&cfix->iscache, buffer, 1ull, isid + 1,
+ 0xa000ull);
+ ptu_int_eq(status, -pte_bad_image);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_map(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[0]);
+ ptu_null(cfix->iscache.lru->next);
+ ptu_uint_eq(cfix->iscache.used, cfix->section[0]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_read(struct iscache_fixture *cfix)
+{
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_iscache_read(&cfix->iscache, buffer, 2ull, isid, 0xa008ull);
+ ptu_int_eq(status, 2);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[0]);
+ ptu_null(cfix->iscache.lru->next);
+ ptu_uint_eq(cfix->iscache.used, cfix->section[0]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_map_nodup(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = 2 * cfix->section[0]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[0]);
+ ptu_null(cfix->iscache.lru->next);
+ ptu_uint_eq(cfix->iscache.used, cfix->section[0]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_map_too_big(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size - 1;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_null(cfix->iscache.lru);
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_map_add_front(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size + cfix->section[1]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[1], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[1]);
+ ptu_ptr(cfix->iscache.lru->next);
+ ptu_ptr_eq(cfix->iscache.lru->next->section, cfix->section[0]);
+ ptu_null(cfix->iscache.lru->next->next);
+ ptu_uint_eq(cfix->iscache.used,
+ cfix->section[0]->size + cfix->section[1]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_map_move_front(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size + cfix->section[1]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[1], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[0]);
+ ptu_ptr(cfix->iscache.lru->next);
+ ptu_ptr_eq(cfix->iscache.lru->next->section, cfix->section[1]);
+ ptu_null(cfix->iscache.lru->next->next);
+ ptu_uint_eq(cfix->iscache.used,
+ cfix->section[0]->size + cfix->section[1]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_map_evict(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size +
+ cfix->section[1]->size - 1;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[1], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[1]);
+ ptu_null(cfix->iscache.lru->next);
+ ptu_uint_eq(cfix->iscache.used, cfix->section[1]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_bcache_evict(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = 4 * cfix->section[0]->size +
+ cfix->section[1]->size - 1;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[1], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_request_bcache(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[0]);
+ ptu_null(cfix->iscache.lru->next);
+ ptu_uint_eq(cfix->iscache.used, 4 * cfix->section[0]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_bcache_clear(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size + cfix->section[1]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[1], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_request_bcache(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ ptu_null(cfix->iscache.lru);
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_limit_evict(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size + cfix->section[1]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[1], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_map(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[1]);
+ ptu_int_eq(status, 0);
+
+ status = pt_iscache_set_limit(&cfix->iscache,
+ cfix->section[0]->size +
+ cfix->section[1]->size - 1);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr(cfix->iscache.lru);
+ ptu_ptr_eq(cfix->iscache.lru->section, cfix->section[1]);
+ ptu_null(cfix->iscache.lru->next);
+ ptu_uint_eq(cfix->iscache.used, cfix->section[1]->size);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result lru_clear(struct iscache_fixture *cfix)
+{
+ int status, isid;
+
+ cfix->iscache.limit = cfix->section[0]->size;
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+ ptu_null(cfix->iscache.lru);
+
+ isid = pt_iscache_add(&cfix->iscache, cfix->section[0], 0xa000ull);
+ ptu_int_gt(isid, 0);
+
+ status = pt_section_map(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_unmap(cfix->section[0]);
+ ptu_int_eq(status, 0);
+
+ status = pt_iscache_clear(&cfix->iscache);
+ ptu_int_eq(status, 0);
+
+ ptu_null(cfix->iscache.lru);
+ ptu_uint_eq(cfix->iscache.used, 0ull);
+
+ return ptu_passed();
+}
+
+static int worker_add(void *arg)
+{
+ struct iscache_fixture *cfix;
+ int it;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_iterations; ++it) {
+ uint64_t laddr;
+ int sec;
+
+ laddr = 0x1000ull * (it % 23);
+
+ for (sec = 0; sec < num_sections; ++sec) {
+ struct pt_section *section;
+ uint64_t addr;
+ int isid, errcode;
+
+ isid = pt_iscache_add(&cfix->iscache,
+ cfix->section[sec], laddr);
+ if (isid < 0)
+ return isid;
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section,
+ &addr, isid);
+ if (errcode < 0)
+ return errcode;
+
+ if (laddr != addr)
+ return -pte_noip;
+
+ /* We may not get the image we added but the image we
+ * get must have similar attributes.
+ *
+ * We're using the same filename string literal for all
+ * sections, though.
+ */
+ if (section->offset != cfix->section[sec]->offset)
+ return -pte_bad_image;
+
+ if (section->size != cfix->section[sec]->size)
+ return -pte_bad_image;
+
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+ }
+ }
+
+ return 0;
+}
+
+static int worker_add_file(void *arg)
+{
+ struct iscache_fixture *cfix;
+ int it;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_iterations; ++it) {
+ uint64_t offset, size, laddr;
+ int sec;
+
+ offset = it % 7 == 0 ? 0x1000 : 0x2000;
+ size = it % 5 == 0 ? 0x1000 : 0x2000;
+ laddr = it % 3 == 0 ? 0x1000 : 0x2000;
+
+ for (sec = 0; sec < num_sections; ++sec) {
+ struct pt_section *section;
+ uint64_t addr;
+ int isid, errcode;
+
+ isid = pt_iscache_add_file(&cfix->iscache, "name",
+ offset, size, laddr);
+ if (isid < 0)
+ return isid;
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section,
+ &addr, isid);
+ if (errcode < 0)
+ return errcode;
+
+ if (laddr != addr)
+ return -pte_noip;
+
+ if (section->offset != offset)
+ return -pte_bad_image;
+
+ if (section->size != size)
+ return -pte_bad_image;
+
+ errcode = pt_section_put(section);
+ if (errcode < 0)
+ return errcode;
+ }
+ }
+
+ return 0;
+}
+
+static int worker_map(void *arg)
+{
+ struct iscache_fixture *cfix;
+ int it, sec, status;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_iterations; ++it) {
+ for (sec = 0; sec < num_sections; ++sec) {
+
+ status = pt_section_map(cfix->section[sec]);
+ if (status < 0)
+ return status;
+
+ status = pt_section_unmap(cfix->section[sec]);
+ if (status < 0)
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int worker_map_limit(void *arg)
+{
+ struct iscache_fixture *cfix;
+ uint64_t limits[] = { 0x8000, 0x3000, 0x12000, 0x0 }, limit;
+ int it, sec, errcode, lim;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ lim = 0;
+ for (it = 0; it < num_iterations; ++it) {
+ for (sec = 0; sec < num_sections; ++sec) {
+
+ errcode = pt_section_map(cfix->section[sec]);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_unmap(cfix->section[sec]);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ if (it % 23 != 0)
+ continue;
+
+ limit = limits[lim++];
+ lim %= sizeof(limits) / sizeof(*limits);
+
+ errcode = pt_iscache_set_limit(&cfix->iscache, limit);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+}
+
+static int worker_map_bcache(void *arg)
+{
+ struct iscache_fixture *cfix;
+ int it, sec, status;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_iterations; ++it) {
+ for (sec = 0; sec < num_sections; ++sec) {
+ struct pt_section *section;
+
+ section = cfix->section[sec];
+
+ status = pt_section_map(section);
+ if (status < 0)
+ return status;
+
+ if (it % 13 == 0) {
+ status = pt_section_request_bcache(section);
+ if (status < 0) {
+ (void) pt_section_unmap(section);
+ return status;
+ }
+ }
+
+ status = pt_section_unmap(section);
+ if (status < 0)
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int worker_add_map(void *arg)
+{
+ struct iscache_fixture *cfix;
+ struct pt_section *section;
+ int it;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ section = cfix->section[0];
+ for (it = 0; it < num_iterations; ++it) {
+ uint64_t laddr;
+ int isid, errcode;
+
+ laddr = (uint64_t) it << 3;
+
+ isid = pt_iscache_add(&cfix->iscache, section, laddr);
+ if (isid < 0)
+ return isid;
+
+ errcode = pt_section_map(section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_unmap(section);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+}
+
+static int worker_add_clear(void *arg)
+{
+ struct iscache_fixture *cfix;
+ struct pt_section *section;
+ int it;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ section = cfix->section[0];
+ for (it = 0; it < num_iterations; ++it) {
+ uint64_t laddr;
+ int isid, errcode;
+
+ laddr = (uint64_t) it << 3;
+
+ isid = pt_iscache_add(&cfix->iscache, section, laddr);
+ if (isid < 0)
+ return isid;
+
+ errcode = pt_iscache_clear(&cfix->iscache);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+}
+
+static int worker_add_file_map(void *arg)
+{
+ struct iscache_fixture *cfix;
+ int it;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_iterations; ++it) {
+ struct pt_section *section;
+ uint64_t offset, size, laddr, addr;
+ int isid, errcode;
+
+ offset = it % 7 < 4 ? 0x1000 : 0x2000;
+ size = it % 5 < 3 ? 0x1000 : 0x2000;
+ laddr = it % 3 < 2 ? 0x1000 : 0x2000;
+
+ isid = pt_iscache_add_file(&cfix->iscache, "name",
+ offset, size, laddr);
+ if (isid < 0)
+ return isid;
+
+ errcode = pt_iscache_lookup(&cfix->iscache, &section,
+ &addr, isid);
+ if (errcode < 0)
+ return errcode;
+
+ if (addr != laddr)
+ return -pte_internal;
+
+ errcode = pt_section_map(section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_unmap(section);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+}
+
+static int worker_add_file_clear(void *arg)
+{
+ struct iscache_fixture *cfix;
+ int it;
+
+ cfix = arg;
+ if (!cfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_iterations; ++it) {
+ uint64_t offset, size, laddr;
+ int isid, errcode;
+
+ offset = it % 7 < 4 ? 0x1000 : 0x2000;
+ size = it % 5 < 3 ? 0x1000 : 0x2000;
+ laddr = it % 3 < 2 ? 0x1000 : 0x2000;
+
+ isid = pt_iscache_add_file(&cfix->iscache, "name",
+ offset, size, laddr);
+ if (isid < 0)
+ return isid;
+
+ if (it % 11 < 9)
+ continue;
+
+ errcode = pt_iscache_clear(&cfix->iscache);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+}
+
+static struct ptunit_result stress(struct iscache_fixture *cfix,
+ int (*worker)(void *))
+{
+ int errcode;
+
+#if defined(FEATURE_THREADS)
+ {
+ int thrd;
+
+ for (thrd = 0; thrd < num_threads; ++thrd)
+ ptu_test(ptunit_thrd_create, &cfix->thrd, worker, cfix);
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ errcode = worker(cfix);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+int main(int argc, char **argv)
+{
+ struct iscache_fixture cfix, dfix, sfix;
+ struct ptunit_suite suite;
+
+ cfix.init = cfix_init;
+ cfix.fini = cfix_fini;
+
+ dfix.init = dfix_init;
+ dfix.fini = cfix_fini;
+
+ sfix.init = sfix_init;
+ sfix.fini = cfix_fini;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init_null);
+ ptu_run(suite, fini_null);
+ ptu_run(suite, name_null);
+ ptu_run(suite, add_null);
+ ptu_run(suite, find_null);
+ ptu_run(suite, lookup_null);
+ ptu_run(suite, clear_null);
+ ptu_run(suite, free_null);
+ ptu_run(suite, add_file_null);
+ ptu_run(suite, read_null);
+
+ ptu_run_f(suite, name, dfix);
+ ptu_run_f(suite, name_none, dfix);
+
+ ptu_run_f(suite, init_fini, cfix);
+ ptu_run_f(suite, add, cfix);
+ ptu_run_f(suite, add_no_name, cfix);
+ ptu_run_f(suite, add_file, cfix);
+
+ ptu_run_f(suite, find, cfix);
+ ptu_run_f(suite, find_empty, cfix);
+ ptu_run_f(suite, find_bad_filename, cfix);
+ ptu_run_f(suite, find_null_filename, cfix);
+ ptu_run_f(suite, find_bad_offset, cfix);
+ ptu_run_f(suite, find_bad_size, cfix);
+ ptu_run_f(suite, find_bad_laddr, cfix);
+
+ ptu_run_f(suite, lookup, cfix);
+ ptu_run_f(suite, lookup_bad_isid, cfix);
+
+ ptu_run_f(suite, clear_empty, cfix);
+ ptu_run_f(suite, clear_find, cfix);
+ ptu_run_f(suite, clear_lookup, cfix);
+
+ ptu_run_f(suite, add_twice, cfix);
+ ptu_run_f(suite, add_same, cfix);
+ ptu_run_f(suite, add_twice_different_laddr, cfix);
+ ptu_run_f(suite, add_same_different_laddr, cfix);
+ ptu_run_f(suite, add_different_same_laddr, cfix);
+
+ ptu_run_f(suite, add_file_same, cfix);
+ ptu_run_f(suite, add_file_same_different_laddr, cfix);
+ ptu_run_f(suite, add_file_different_same_laddr, cfix);
+
+ ptu_run_f(suite, read, cfix);
+ ptu_run_f(suite, read_truncate, cfix);
+ ptu_run_f(suite, read_bad_vaddr, cfix);
+ ptu_run_f(suite, read_bad_isid, cfix);
+
+ ptu_run_f(suite, lru_map, cfix);
+ ptu_run_f(suite, lru_read, cfix);
+ ptu_run_f(suite, lru_map_nodup, cfix);
+ ptu_run_f(suite, lru_map_too_big, cfix);
+ ptu_run_f(suite, lru_map_add_front, cfix);
+ ptu_run_f(suite, lru_map_move_front, cfix);
+ ptu_run_f(suite, lru_map_evict, cfix);
+ ptu_run_f(suite, lru_limit_evict, cfix);
+ ptu_run_f(suite, lru_bcache_evict, cfix);
+ ptu_run_f(suite, lru_bcache_clear, cfix);
+ ptu_run_f(suite, lru_clear, cfix);
+
+ ptu_run_fp(suite, stress, cfix, worker_add);
+ ptu_run_fp(suite, stress, cfix, worker_add_file);
+ ptu_run_fp(suite, stress, sfix, worker_map);
+ ptu_run_fp(suite, stress, sfix, worker_map_limit);
+ ptu_run_fp(suite, stress, sfix, worker_map_bcache);
+ ptu_run_fp(suite, stress, cfix, worker_add_map);
+ ptu_run_fp(suite, stress, cfix, worker_add_clear);
+ ptu_run_fp(suite, stress, cfix, worker_add_file_map);
+ ptu_run_fp(suite, stress, cfix, worker_add_file_clear);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-last_ip.c b/contrib/processor-trace/libipt/test/src/ptunit-last_ip.c
new file mode 100644
index 0000000000000..bac47a33c3ed8
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-last_ip.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_last_ip.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+
+
+static struct ptunit_result init(void)
+{
+ struct pt_last_ip last_ip;
+
+ memset(&last_ip, 0xcd, sizeof(last_ip));
+
+ pt_last_ip_init(&last_ip);
+
+ ptu_uint_eq(last_ip.ip, 0ull);
+ ptu_uint_eq(last_ip.have_ip, 0);
+ ptu_uint_eq(last_ip.suppressed, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_null(void)
+{
+ pt_last_ip_init(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result status_initial(void)
+{
+ struct pt_last_ip last_ip;
+ int errcode;
+
+ pt_last_ip_init(&last_ip);
+
+ errcode = pt_last_ip_query(NULL, &last_ip);
+ ptu_int_eq(errcode, -pte_noip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result status(void)
+{
+ struct pt_last_ip last_ip;
+ int errcode;
+
+ last_ip.have_ip = 1;
+ last_ip.suppressed = 0;
+
+ errcode = pt_last_ip_query(NULL, &last_ip);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result status_null(void)
+{
+ int errcode;
+
+ errcode = pt_last_ip_query(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result status_noip(void)
+{
+ struct pt_last_ip last_ip;
+ int errcode;
+
+ last_ip.have_ip = 0;
+ last_ip.suppressed = 0;
+
+ errcode = pt_last_ip_query(NULL, &last_ip);
+ ptu_int_eq(errcode, -pte_noip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result status_suppressed(void)
+{
+ struct pt_last_ip last_ip;
+ int errcode;
+
+ last_ip.have_ip = 1;
+ last_ip.suppressed = 1;
+
+ errcode = pt_last_ip_query(NULL, &last_ip);
+ ptu_int_eq(errcode, -pte_ip_suppressed);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_initial(void)
+{
+ struct pt_last_ip last_ip;
+ uint64_t ip;
+ int errcode;
+
+ pt_last_ip_init(&last_ip);
+
+ errcode = pt_last_ip_query(&ip, &last_ip);
+ ptu_int_eq(errcode, -pte_noip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query(void)
+{
+ struct pt_last_ip last_ip;
+ uint64_t ip, exp = 42ull;
+ int errcode;
+
+ last_ip.ip = 42ull;
+ last_ip.have_ip = 1;
+ last_ip.suppressed = 0;
+
+ errcode = pt_last_ip_query(&ip, &last_ip);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(last_ip.ip, exp);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_null(void)
+{
+ uint64_t ip = 13ull;
+ int errcode;
+
+ errcode = pt_last_ip_query(&ip, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+ ptu_uint_eq(ip, 13ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_noip(void)
+{
+ struct pt_last_ip last_ip;
+ uint64_t ip = 13ull;
+ int errcode;
+
+ last_ip.ip = 42ull;
+ last_ip.have_ip = 0;
+ last_ip.suppressed = 0;
+
+ errcode = pt_last_ip_query(&ip, &last_ip);
+ ptu_int_eq(errcode, -pte_noip);
+ ptu_uint_eq(ip, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_suppressed(void)
+{
+ struct pt_last_ip last_ip;
+ uint64_t ip = 13ull;
+ int errcode;
+
+ last_ip.ip = 42ull;
+ last_ip.have_ip = 1;
+ last_ip.suppressed = 1;
+
+ errcode = pt_last_ip_query(&ip, &last_ip);
+ ptu_int_eq(errcode, -pte_ip_suppressed);
+ ptu_uint_eq(ip, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_suppressed(uint32_t have_ip)
+{
+ struct pt_last_ip last_ip;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ last_ip.ip = 42ull;
+ last_ip.have_ip = have_ip;
+ last_ip.suppressed = 0;
+
+ packet.ipc = pt_ipc_suppressed;
+ packet.ip = 13ull;
+
+ errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(last_ip.ip, 42ull);
+ ptu_uint_eq(last_ip.have_ip, have_ip);
+ ptu_uint_eq(last_ip.suppressed, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_upd16(uint32_t have_ip)
+{
+ struct pt_last_ip last_ip;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ last_ip.ip = 0xff0042ull;
+ last_ip.have_ip = have_ip;
+ last_ip.suppressed = 0;
+
+ packet.ipc = pt_ipc_update_16;
+ packet.ip = 0xccc013ull;
+
+ errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(last_ip.ip, 0xffc013ull);
+ ptu_uint_eq(last_ip.have_ip, 1);
+ ptu_uint_eq(last_ip.suppressed, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_upd32(uint32_t have_ip)
+{
+ struct pt_last_ip last_ip;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ last_ip.ip = 0xff00000420ull;
+ last_ip.have_ip = have_ip;
+ last_ip.suppressed = 0;
+
+ packet.ipc = pt_ipc_update_32;
+ packet.ip = 0xcc0000c013ull;
+
+ errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(last_ip.ip, 0xff0000c013ull);
+ ptu_uint_eq(last_ip.have_ip, 1);
+ ptu_uint_eq(last_ip.suppressed, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_sext48(uint32_t have_ip)
+{
+ struct pt_last_ip last_ip;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ last_ip.ip = 0x7fffffffffffffffull;
+ last_ip.have_ip = have_ip;
+ last_ip.suppressed = 0;
+
+ packet.ipc = pt_ipc_sext_48;
+ packet.ip = 0xff00000000ffull;
+
+ errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(last_ip.ip, 0xffffff00000000ffull);
+ ptu_uint_eq(last_ip.have_ip, 1);
+ ptu_uint_eq(last_ip.suppressed, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_bad_packet(uint32_t have_ip)
+{
+ struct pt_last_ip last_ip;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ last_ip.ip = 0x7fffffffffffffffull;
+ last_ip.have_ip = have_ip;
+ last_ip.suppressed = 0;
+
+ packet.ipc = (enum pt_ip_compression) 0xff;
+ packet.ip = 0ull;
+
+ errcode = pt_last_ip_update_ip(&last_ip, &packet, NULL);
+ ptu_int_eq(errcode, -pte_bad_packet);
+ ptu_uint_eq(last_ip.ip, 0x7fffffffffffffffull);
+ ptu_uint_eq(last_ip.have_ip, have_ip);
+ ptu_uint_eq(last_ip.suppressed, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_null_ip(void)
+{
+ struct pt_packet_ip packet;
+ int errcode;
+
+ errcode = pt_last_ip_update_ip(NULL, &packet, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_ip_null_packet(uint32_t have_ip)
+{
+ struct pt_last_ip last_ip;
+ int errcode;
+
+ last_ip.ip = 0x7fffffffffffffffull;
+ last_ip.have_ip = have_ip;
+ last_ip.suppressed = 0;
+
+ errcode = pt_last_ip_update_ip(&last_ip, NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+ ptu_uint_eq(last_ip.ip, 0x7fffffffffffffffull);
+ ptu_uint_eq(last_ip.have_ip, have_ip);
+ ptu_uint_eq(last_ip.suppressed, 0);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init);
+ ptu_run(suite, init_null);
+ ptu_run(suite, status_initial);
+ ptu_run(suite, status);
+ ptu_run(suite, status_null);
+ ptu_run(suite, status_noip);
+ ptu_run(suite, status_suppressed);
+ ptu_run(suite, query_initial);
+ ptu_run(suite, query);
+ ptu_run(suite, query_null);
+ ptu_run(suite, query_noip);
+ ptu_run(suite, query_suppressed);
+ ptu_run_p(suite, update_ip_suppressed, 0);
+ ptu_run_p(suite, update_ip_suppressed, 1);
+ ptu_run_p(suite, update_ip_upd16, 0);
+ ptu_run_p(suite, update_ip_upd16, 1);
+ ptu_run_p(suite, update_ip_upd32, 0);
+ ptu_run_p(suite, update_ip_upd32, 1);
+ ptu_run_p(suite, update_ip_sext48, 0);
+ ptu_run_p(suite, update_ip_sext48, 1);
+ ptu_run_p(suite, update_ip_bad_packet, 0);
+ ptu_run_p(suite, update_ip_bad_packet, 1);
+ ptu_run(suite, update_ip_null_ip);
+ ptu_run_p(suite, update_ip_null_packet, 0);
+ ptu_run_p(suite, update_ip_null_packet, 1);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-mapped_section.c b/contrib/processor-trace/libipt/test/src/ptunit-mapped_section.c
new file mode 100644
index 0000000000000..28f0a5bd6779c
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-mapped_section.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_mapped_section.h"
+
+#include "intel-pt.h"
+
+
+static struct ptunit_result begin(void)
+{
+ struct pt_mapped_section msec;
+ struct pt_section sec;
+ uint64_t begin;
+
+ pt_msec_init(&msec, &sec, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ begin = pt_msec_begin(&msec);
+ ptu_uint_eq(begin, 0x2000);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result end(void)
+{
+ struct pt_mapped_section msec;
+ struct pt_section sec;
+ uint64_t end;
+
+ pt_msec_init(&msec, &sec, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ end = pt_msec_end(&msec);
+ ptu_uint_eq(end, 0x3000);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result offset(void)
+{
+ struct pt_mapped_section msec;
+ struct pt_section sec;
+ uint64_t offset;
+
+ pt_msec_init(&msec, &sec, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ offset = pt_msec_offset(&msec);
+ ptu_uint_eq(offset, 0x100ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result size(void)
+{
+ struct pt_mapped_section msec;
+ struct pt_section sec;
+ uint64_t size;
+
+ pt_msec_init(&msec, &sec, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ size = pt_msec_size(&msec);
+ ptu_uint_eq(size, 0x1000ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result asid(void)
+{
+ struct pt_mapped_section msec;
+ struct pt_asid asid;
+ const struct pt_asid *pasid;
+
+ pt_asid_init(&asid);
+ asid.cr3 = 0xa00000ull;
+ asid.vmcs = 0xb00000ull;
+
+ pt_msec_init(&msec, NULL, &asid, 0x2000ull, 0x100ull, 0x1000ull);
+
+ pasid = pt_msec_asid(&msec);
+ ptu_ptr(pasid);
+ ptu_uint_eq(pasid->cr3, asid.cr3);
+ ptu_uint_eq(pasid->vmcs, asid.vmcs);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result asid_null(void)
+{
+ struct pt_mapped_section msec;
+ const struct pt_asid *pasid;
+
+ pt_msec_init(&msec, NULL, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ pasid = pt_msec_asid(&msec);
+ ptu_ptr(pasid);
+ ptu_uint_eq(pasid->cr3, pt_asid_no_cr3);
+ ptu_uint_eq(pasid->vmcs, pt_asid_no_vmcs);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result map(void)
+{
+ struct pt_mapped_section msec;
+ uint64_t mapped;
+
+ pt_msec_init(&msec, NULL, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ mapped = pt_msec_map(&msec, 0x900);
+ ptu_uint_eq(mapped, 0x2800);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unmap(void)
+{
+ struct pt_mapped_section msec;
+ uint64_t offset;
+
+ pt_msec_init(&msec, NULL, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ offset = pt_msec_unmap(&msec, 0x3000);
+ ptu_uint_eq(offset, 0x1100);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result section(void)
+{
+ static struct pt_section section;
+ struct pt_mapped_section msec;
+ struct pt_section *psection;
+
+ pt_msec_init(&msec, &section, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ psection = pt_msec_section(&msec);
+ ptu_ptr_eq(psection, &section);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result section_null(void)
+{
+ struct pt_mapped_section msec;
+ struct pt_section *psection;
+
+ pt_msec_init(&msec, NULL, NULL, 0x2000ull, 0x100ull, 0x1000ull);
+
+ psection = pt_msec_section(&msec);
+ ptu_ptr_eq(psection, NULL);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, begin);
+ ptu_run(suite, end);
+ ptu_run(suite, offset);
+ ptu_run(suite, size);
+ ptu_run(suite, asid);
+ ptu_run(suite, asid_null);
+ ptu_run(suite, map);
+ ptu_run(suite, unmap);
+ ptu_run(suite, section);
+ ptu_run(suite, section_null);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-msec_cache.c b/contrib/processor-trace/libipt/test/src/ptunit-msec_cache.c
new file mode 100644
index 0000000000000..d3926c9a09676
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-msec_cache.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2017-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_msec_cache.h"
+
+#include "intel-pt.h"
+
+
+int pt_section_get(struct pt_section *section)
+{
+ uint16_t ucount;
+
+ if (!section)
+ return -pte_internal;
+
+ ucount = section->ucount + 1;
+ if (!ucount)
+ return -pte_overflow;
+
+ section->ucount = ucount;
+ return 0;
+}
+
+int pt_section_put(struct pt_section *section)
+{
+ uint16_t ucount;
+
+ if (!section)
+ return -pte_internal;
+
+ ucount = section->ucount;
+ if (!ucount)
+ return -pte_overflow;
+
+ section->ucount = ucount - 1;
+ return 0;
+}
+
+int pt_section_map(struct pt_section *section)
+{
+ uint16_t ucount, mcount;
+
+ if (!section)
+ return -pte_internal;
+
+ ucount = section->ucount;
+ if (!ucount)
+ return -pte_internal;
+
+ mcount = section->mcount + 1;
+ if (!mcount)
+ return -pte_overflow;
+
+ section->mcount = mcount;
+ return 0;
+}
+
+int pt_section_unmap(struct pt_section *section)
+{
+ uint16_t ucount, mcount;
+
+ if (!section)
+ return -pte_internal;
+
+ ucount = section->ucount;
+ if (!ucount)
+ return -pte_internal;
+
+ mcount = section->mcount;
+ if (!mcount)
+ return -pte_overflow;
+
+ section->mcount = mcount - 1;
+ return 0;
+}
+
+/* A mock image. */
+struct pt_image {
+ /* The section stored in the image.
+ *
+ * This is either the fixture's section or NULL.
+ */
+ struct pt_section *section;
+};
+
+extern int pt_image_validate(struct pt_image *, struct pt_mapped_section *,
+ uint64_t, int);
+extern int pt_image_find(struct pt_image *, struct pt_mapped_section *,
+ const struct pt_asid *, uint64_t);
+
+int pt_image_validate(struct pt_image *image, struct pt_mapped_section *msec,
+ uint64_t vaddr, int isid)
+{
+ struct pt_section *section;
+
+ (void) vaddr;
+ (void) isid;
+
+ if (!image || !msec)
+ return -pte_internal;
+
+ section = image->section;
+ if (!section)
+ return -pte_nomap;
+
+ if (section != msec->section)
+ return -pte_nomap;
+
+ return 0;
+}
+
+int pt_image_find(struct pt_image *image, struct pt_mapped_section *msec,
+ const struct pt_asid *asid, uint64_t vaddr)
+{
+ struct pt_section *section;
+
+ (void) vaddr;
+
+ if (!image || !msec || !asid)
+ return -pte_internal;
+
+ section = image->section;
+ if (!section)
+ return -pte_nomap;
+
+ if (msec->section)
+ return -pte_internal;
+
+ msec->section = section;
+
+ return pt_section_get(section);
+}
+
+/* A test fixture providing a section and checking the use and map count. */
+struct test_fixture {
+ /* A test section. */
+ struct pt_section section;
+
+ /* A test cache. */
+ struct pt_msec_cache mcache;
+
+ /* A test image. */
+ struct pt_image image;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct test_fixture *);
+ struct ptunit_result (*fini)(struct test_fixture *);
+};
+
+static struct ptunit_result init_null(void)
+{
+ int status;
+
+ status = pt_msec_cache_init(NULL);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fini_null(void)
+{
+ pt_msec_cache_fini(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result invalidate_null(void)
+{
+ int status;
+
+ status = pt_msec_cache_invalidate(NULL);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_null(void)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_msec_cache mcache;
+ struct pt_image image;
+ int status;
+
+ status = pt_msec_cache_read(NULL, &msec, &image, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_msec_cache_read(&mcache, NULL, &image, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_msec_cache_read(&mcache, &msec, NULL, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fill_null(void)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_msec_cache mcache;
+ struct pt_image image;
+ struct pt_asid asid;
+ int status;
+
+ memset(&mcache, 0, sizeof(mcache));
+
+ status = pt_msec_cache_fill(NULL, &msec, &image, &asid, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_msec_cache_fill(&mcache, NULL, &image, &asid, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_msec_cache_fill(&mcache, &msec, NULL, &asid, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ status = pt_msec_cache_fill(&mcache, &msec, &image, NULL, 0ull);
+ ptu_int_eq(status, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result invalidate(struct test_fixture *tfix)
+{
+ struct pt_section *section;
+ int status;
+
+ status = pt_msec_cache_invalidate(&tfix->mcache);
+ ptu_int_eq(status, 0);
+
+ section = pt_msec_section(&tfix->mcache.msec);
+ ptu_null(section);
+
+ ptu_uint_eq(tfix->section.mcount, 0);
+ ptu_uint_eq(tfix->section.ucount, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_nomap(struct test_fixture *tfix)
+{
+ const struct pt_mapped_section *msec;
+ int status;
+
+ msec = NULL;
+
+ status = pt_msec_cache_read(&tfix->mcache, &msec, &tfix->image, 0ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_null(msec);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read(struct test_fixture *tfix)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_section *section;
+ int status;
+
+ status = pt_msec_cache_read(&tfix->mcache, &msec, &tfix->image, 0ull);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr_eq(msec, &tfix->mcache.msec);
+
+ section = pt_msec_section(msec);
+ ptu_ptr_eq(section, &tfix->section);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fill_nomap(struct test_fixture *tfix)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_asid asid;
+ struct pt_section *section;
+ int status;
+
+ msec = NULL;
+
+ status = pt_msec_cache_fill(&tfix->mcache, &msec, &tfix->image, &asid,
+ 0ull);
+ ptu_int_eq(status, -pte_nomap);
+
+ section = pt_msec_section(&tfix->mcache.msec);
+ ptu_null(section);
+ ptu_null(msec);
+
+ ptu_uint_eq(tfix->section.mcount, 0);
+ ptu_uint_eq(tfix->section.ucount, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result fill(struct test_fixture *tfix)
+{
+ const struct pt_mapped_section *msec;
+ struct pt_section *section;
+ struct pt_asid asid;
+ int status;
+
+ status = pt_msec_cache_fill(&tfix->mcache, &msec, &tfix->image, &asid,
+ 0ull);
+ ptu_int_eq(status, 0);
+
+ ptu_ptr_eq(msec, &tfix->mcache.msec);
+
+ section = pt_msec_section(msec);
+ ptu_ptr_eq(section, &tfix->section);
+
+ ptu_uint_eq(section->mcount, 1);
+ ptu_uint_eq(section->ucount, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sfix_init(struct test_fixture *tfix)
+{
+ memset(&tfix->section, 0, sizeof(tfix->section));
+ memset(&tfix->mcache, 0, sizeof(tfix->mcache));
+ memset(&tfix->image, 0, sizeof(tfix->image));
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ifix_init(struct test_fixture *tfix)
+{
+ ptu_test(sfix_init, tfix);
+
+ tfix->image.section = &tfix->section;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cfix_init(struct test_fixture *tfix)
+{
+ ptu_test(sfix_init, tfix);
+
+ tfix->mcache.msec.section = &tfix->section;
+
+ tfix->section.ucount = 1;
+ tfix->section.mcount = 1;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cifix_init(struct test_fixture *tfix)
+{
+ ptu_test(cfix_init, tfix);
+
+ tfix->image.section = &tfix->section;
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+ struct test_fixture sfix, ifix, cfix, cifix;
+
+ sfix.init = sfix_init;
+ sfix.fini = NULL;
+
+ ifix.init = ifix_init;
+ ifix.fini = NULL;
+
+ cfix.init = cfix_init;
+ cfix.fini = NULL;
+
+ cifix.init = cifix_init;
+ cifix.fini = NULL;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init_null);
+ ptu_run(suite, fini_null);
+ ptu_run(suite, invalidate_null);
+ ptu_run(suite, read_null);
+ ptu_run(suite, fill_null);
+
+ ptu_run_f(suite, invalidate, sfix);
+ ptu_run_f(suite, invalidate, cfix);
+
+ ptu_run_f(suite, read_nomap, sfix);
+ ptu_run_f(suite, read_nomap, ifix);
+ ptu_run_f(suite, read_nomap, cfix);
+ ptu_run_f(suite, read, cifix);
+
+ ptu_run_f(suite, fill_nomap, sfix);
+ ptu_run_f(suite, fill_nomap, cfix);
+ ptu_run_f(suite, fill, ifix);
+ ptu_run_f(suite, fill, cifix);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-packet.c b/contrib/processor-trace/libipt/test/src/ptunit-packet.c
new file mode 100644
index 0000000000000..c064081f9968c
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-packet.c
@@ -0,0 +1,859 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_packet_decoder.h"
+#include "pt_query_decoder.h"
+#include "pt_encoder.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+
+
+/* A test fixture providing everything needed for packet en- and de-coding. */
+struct packet_fixture {
+ /* The trace buffer. */
+ uint8_t buffer[64];
+
+ /* Two packets for encoding[0] and decoding[1]. */
+ struct pt_packet packet[2];
+
+ /* The configuration. */
+ struct pt_config config;
+
+ /* The encoder. */
+ struct pt_encoder encoder;
+
+ /* The decoder. */
+ struct pt_packet_decoder decoder;
+
+ /* The return value for an unknown decode. */
+ int unknown;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct packet_fixture *);
+ struct ptunit_result (*fini)(struct packet_fixture *);
+};
+
+static int pfix_decode_unknown(struct pt_packet_unknown *packet,
+ const struct pt_config *config,
+ const uint8_t *pos, void *context)
+{
+ struct packet_fixture *pfix;
+
+ if (!packet || !config)
+ return -pte_internal;
+
+ pfix = (struct packet_fixture *) context;
+ if (!pfix)
+ return -pte_internal;
+
+ if (config->begin != pfix->buffer)
+ return -pte_internal;
+
+ if (config->end != pfix->buffer + sizeof(pfix->buffer))
+ return -pte_internal;
+
+ if (pos != pfix->buffer)
+ return -pte_internal;
+
+ packet->priv = pfix;
+
+ return pfix->unknown;
+}
+
+static struct ptunit_result pfix_init(struct packet_fixture *pfix)
+{
+ int errcode;
+
+ memset(pfix->buffer, 0, sizeof(pfix->buffer));
+ memset(pfix->packet, 0, sizeof(pfix->packet));
+ memset(&pfix->config, 0, sizeof(pfix->config));
+ pfix->config.size = sizeof(pfix->config);
+ pfix->config.begin = pfix->buffer;
+ pfix->config.end = pfix->buffer + sizeof(pfix->buffer);
+ pfix->config.decode.callback = pfix_decode_unknown;
+ pfix->config.decode.context = pfix;
+
+ pt_encoder_init(&pfix->encoder, &pfix->config);
+ pt_pkt_decoder_init(&pfix->decoder, &pfix->config);
+
+ errcode = pt_pkt_sync_set(&pfix->decoder, 0x0ull);
+ ptu_int_eq(errcode, 0);
+
+ pfix->unknown = 0;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pfix_fini(struct packet_fixture *pfix)
+{
+ pt_encoder_fini(&pfix->encoder);
+ pt_pkt_decoder_fini(&pfix->decoder);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptu_pkt_eq(const struct pt_packet *enc,
+ const struct pt_packet *dec)
+{
+ const uint8_t *renc, *rdec;
+ size_t byte;
+
+ ptu_ptr(enc);
+ ptu_ptr(dec);
+
+ renc = (const uint8_t *) enc;
+ rdec = (const uint8_t *) dec;
+
+ for (byte = 0; byte < sizeof(*enc); ++byte)
+ ptu_uint_eq(renc[byte], rdec[byte]);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pfix_test(struct packet_fixture *pfix)
+{
+ int size;
+
+ size = pt_enc_next(&pfix->encoder, &pfix->packet[0]);
+ ptu_int_gt(size, 0);
+
+ pfix->packet[0].size = (uint8_t) size;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_gt(size, 0);
+
+ return ptu_pkt_eq(&pfix->packet[0], &pfix->packet[1]);
+}
+
+static struct ptunit_result no_payload(struct packet_fixture *pfix,
+ enum pt_packet_type type)
+{
+ pfix->packet[0].type = type;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unknown(struct packet_fixture *pfix, int exp)
+{
+ int size;
+
+ pfix->buffer[0] = pt_opc_bad;
+ pfix->unknown = exp;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, pfix->unknown);
+
+ if (size >= 0) {
+ ptu_int_eq(pfix->packet[1].type, ppt_unknown);
+ ptu_uint_eq(pfix->packet[1].size, (uint8_t) size);
+ ptu_ptr_eq(pfix->packet[1].payload.unknown.packet,
+ pfix->buffer);
+ ptu_ptr_eq(pfix->packet[1].payload.unknown.priv, pfix);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unknown_ext(struct packet_fixture *pfix, int exp)
+{
+ int size;
+
+ pfix->buffer[0] = pt_opc_ext;
+ pfix->buffer[1] = pt_ext_bad;
+ pfix->unknown = exp;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, pfix->unknown);
+
+ if (size >= 0) {
+ ptu_int_eq(pfix->packet[1].type, ppt_unknown);
+ ptu_uint_eq(pfix->packet[1].size, (uint8_t) size);
+ ptu_ptr_eq(pfix->packet[1].payload.unknown.packet,
+ pfix->buffer);
+ ptu_ptr_eq(pfix->packet[1].payload.unknown.priv, pfix);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unknown_ext2(struct packet_fixture *pfix, int exp)
+{
+ int size;
+
+ pfix->buffer[0] = pt_opc_ext;
+ pfix->buffer[1] = pt_ext_ext2;
+ pfix->buffer[2] = pt_ext2_bad;
+ pfix->unknown = exp;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, exp);
+
+ if (exp >= 0) {
+ ptu_int_eq(pfix->packet[1].type, ppt_unknown);
+ ptu_uint_eq(pfix->packet[1].size, (uint8_t) size);
+ ptu_ptr_eq(pfix->packet[1].payload.unknown.packet,
+ pfix->buffer);
+ ptu_ptr_eq(pfix->packet[1].payload.unknown.priv, pfix);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tnt_8(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_tnt_8;
+ pfix->packet[0].payload.tnt.bit_size = 4;
+ pfix->packet[0].payload.tnt.payload = 0x5ull;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tnt_64(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_tnt_64;
+ pfix->packet[0].payload.tnt.bit_size = 23;
+ pfix->packet[0].payload.tnt.payload = 0xabcdeull;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ip(struct packet_fixture *pfix,
+ enum pt_packet_type type,
+ enum pt_ip_compression ipc,
+ uint64_t ip)
+{
+ pfix->packet[0].type = type;
+ pfix->packet[0].payload.ip.ipc = ipc;
+ pfix->packet[0].payload.ip.ip = ip;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mode_exec(struct packet_fixture *pfix,
+ enum pt_exec_mode mode)
+{
+ struct pt_packet_mode_exec packet;
+
+ packet = pt_set_exec_mode(mode);
+
+ pfix->packet[0].type = ppt_mode;
+ pfix->packet[0].payload.mode.leaf = pt_mol_exec;
+ pfix->packet[0].payload.mode.bits.exec.csl = packet.csl;
+ pfix->packet[0].payload.mode.bits.exec.csd = packet.csd;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mode_tsx(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_mode;
+ pfix->packet[0].payload.mode.leaf = pt_mol_tsx;
+ pfix->packet[0].payload.mode.bits.tsx.intx = 1;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pip(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_pip;
+ pfix->packet[0].payload.pip.cr3 = 0x4200ull;
+ pfix->packet[0].payload.pip.nr = 1;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tsc(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_tsc;
+ pfix->packet[0].payload.tsc.tsc = 0x42ull;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cbr(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_cbr;
+ pfix->packet[0].payload.cbr.ratio = 0x23;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tma(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_tma;
+ pfix->packet[0].payload.tma.ctc = 0x42;
+ pfix->packet[0].payload.tma.fc = 0x123;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tma_bad(struct packet_fixture *pfix)
+{
+ int errcode;
+
+ pfix->packet[0].type = ppt_tma;
+ pfix->packet[0].payload.tma.ctc = 0x42;
+ pfix->packet[0].payload.tma.fc = 0x200;
+
+ errcode = pt_enc_next(&pfix->encoder, &pfix->packet[0]);
+ ptu_int_eq(errcode, -pte_bad_packet);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mtc(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_mtc;
+ pfix->packet[0].payload.mtc.ctc = 0x23;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cyc(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_cyc;
+ pfix->packet[0].payload.cyc.value = 0x23;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result vmcs(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_vmcs;
+ pfix->packet[0].payload.vmcs.base = 0xabcdef000ull;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mnt(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_mnt;
+ pfix->packet[0].payload.mnt.payload = 0x1234567890abcdefull;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result exstop(struct packet_fixture *pfix, int ip)
+{
+ pfix->packet[0].type = ppt_exstop;
+ pfix->packet[0].payload.exstop.ip = ip ? 1 : 0;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mwait(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_mwait;
+ pfix->packet[0].payload.mwait.hints = 0xc;
+ pfix->packet[0].payload.mwait.ext = 0x1;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pwre(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_pwre;
+ pfix->packet[0].payload.pwre.state = 0x0;
+ pfix->packet[0].payload.pwre.sub_state = 0x3;
+ pfix->packet[0].payload.pwre.hw = 1;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pwrx(struct packet_fixture *pfix)
+{
+ pfix->packet[0].type = ppt_pwrx;
+ pfix->packet[0].payload.pwrx.last = 0x3;
+ pfix->packet[0].payload.pwrx.deepest = 0xa;
+ pfix->packet[0].payload.pwrx.store = 1;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptw(struct packet_fixture *pfix, uint8_t plc,
+ int ip)
+{
+ uint64_t pl, mask;
+ int size;
+
+ size = pt_ptw_size(plc);
+ ptu_int_gt(size, 0);
+
+ pl = 0x1234567890abcdefull;
+
+ ptu_uint_le((size_t) size, sizeof(mask));
+ mask = ~0ull >> ((sizeof(mask) - (size_t) size) * 8);
+
+ pfix->packet[0].type = ppt_ptw;
+ pfix->packet[0].payload.ptw.payload = pl & mask;
+ pfix->packet[0].payload.ptw.plc = plc;
+ pfix->packet[0].payload.ptw.ip = ip ? 1 : 0;
+
+ ptu_test(pfix_test, pfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cutoff(struct packet_fixture *pfix,
+ enum pt_packet_type type)
+{
+ int size;
+
+ pfix->packet[0].type = type;
+
+ size = pt_enc_next(&pfix->encoder, &pfix->packet[0]);
+ ptu_int_gt(size, 0);
+
+ pfix->decoder.config.end = pfix->encoder.pos - 1;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cutoff_ip(struct packet_fixture *pfix,
+ enum pt_packet_type type)
+{
+ int size;
+
+ pfix->packet[0].type = type;
+ pfix->packet[0].payload.ip.ipc = pt_ipc_sext_48;
+
+ size = pt_enc_next(&pfix->encoder, &pfix->packet[0]);
+ ptu_int_gt(size, 0);
+
+ pfix->decoder.config.end = pfix->encoder.pos - 1;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cutoff_cyc(struct packet_fixture *pfix)
+{
+ int size;
+
+ pfix->packet[0].type = ppt_cyc;
+ pfix->packet[0].payload.cyc.value = 0xa8;
+
+ size = pt_enc_next(&pfix->encoder, &pfix->packet[0]);
+ ptu_int_gt(size, 0);
+
+ pfix->decoder.config.end = pfix->encoder.pos - 1;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cutoff_mode(struct packet_fixture *pfix,
+ enum pt_mode_leaf leaf)
+{
+ int size;
+
+ pfix->packet[0].type = ppt_mode;
+ pfix->packet[0].payload.mode.leaf = leaf;
+
+ size = pt_enc_next(&pfix->encoder, &pfix->packet[0]);
+ ptu_int_gt(size, 0);
+
+ pfix->decoder.config.end = pfix->encoder.pos - 1;
+
+ size = pt_pkt_next(&pfix->decoder, &pfix->packet[1],
+ sizeof(pfix->packet[1]));
+ ptu_int_eq(size, -pte_eos);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct packet_fixture pfix;
+ struct ptunit_suite suite;
+
+ pfix.init = pfix_init;
+ pfix.fini = pfix_fini;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run_fp(suite, no_payload, pfix, ppt_pad);
+ ptu_run_fp(suite, no_payload, pfix, ppt_psb);
+ ptu_run_fp(suite, no_payload, pfix, ppt_ovf);
+ ptu_run_fp(suite, no_payload, pfix, ppt_psbend);
+ ptu_run_fp(suite, no_payload, pfix, ppt_stop);
+
+ ptu_run_fp(suite, unknown, pfix, 4);
+ ptu_run_fp(suite, unknown, pfix, -pte_nomem);
+ ptu_run_fp(suite, unknown_ext, pfix, 4);
+ ptu_run_fp(suite, unknown_ext, pfix, -pte_nomem);
+ ptu_run_fp(suite, unknown_ext2, pfix, 4);
+ ptu_run_fp(suite, unknown_ext2, pfix, -pte_nomem);
+
+ ptu_run_f(suite, tnt_8, pfix);
+ ptu_run_f(suite, tnt_64, pfix);
+
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_suppressed, 0x0ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_16, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_32, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_48, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_sext_48, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_full, 0x42ull);
+
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_suppressed, 0x0ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_16, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_32, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_update_48, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_sext_48, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip, pt_ipc_full, 0x42ull);
+
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_suppressed, 0x0ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_update_16, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_update_32, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_update_48, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_sext_48, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pge, pt_ipc_full, 0x42ull);
+
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_suppressed, 0x0ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_update_16, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_update_32, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_update_48, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_sext_48, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_tip_pgd, pt_ipc_full, 0x42ull);
+
+ ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_suppressed, 0x0ull);
+ ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_update_16, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_update_32, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_update_48, 0x4200ull);
+ ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_sext_48, 0x42ull);
+ ptu_run_fp(suite, ip, pfix, ppt_fup, pt_ipc_full, 0x42ull);
+
+ ptu_run_fp(suite, mode_exec, pfix, ptem_16bit);
+ ptu_run_fp(suite, mode_exec, pfix, ptem_32bit);
+ ptu_run_fp(suite, mode_exec, pfix, ptem_64bit);
+ ptu_run_f(suite, mode_tsx, pfix);
+
+ ptu_run_f(suite, pip, pfix);
+ ptu_run_f(suite, tsc, pfix);
+ ptu_run_f(suite, cbr, pfix);
+ ptu_run_f(suite, tma, pfix);
+ ptu_run_f(suite, tma_bad, pfix);
+ ptu_run_f(suite, mtc, pfix);
+ ptu_run_f(suite, cyc, pfix);
+ ptu_run_f(suite, vmcs, pfix);
+ ptu_run_f(suite, mnt, pfix);
+ ptu_run_fp(suite, exstop, pfix, 0);
+ ptu_run_fp(suite, exstop, pfix, 1);
+ ptu_run_f(suite, mwait, pfix);
+ ptu_run_f(suite, pwre, pfix);
+ ptu_run_f(suite, pwrx, pfix);
+ ptu_run_fp(suite, ptw, pfix, 0, 1);
+ ptu_run_fp(suite, ptw, pfix, 1, 0);
+
+ ptu_run_fp(suite, cutoff, pfix, ppt_psb);
+ ptu_run_fp(suite, cutoff_ip, pfix, ppt_tip);
+ ptu_run_fp(suite, cutoff_ip, pfix, ppt_tip_pge);
+ ptu_run_fp(suite, cutoff_ip, pfix, ppt_tip_pgd);
+ ptu_run_fp(suite, cutoff_ip, pfix, ppt_fup);
+ ptu_run_fp(suite, cutoff, pfix, ppt_ovf);
+ ptu_run_fp(suite, cutoff, pfix, ppt_psbend);
+ ptu_run_fp(suite, cutoff, pfix, ppt_tnt_64);
+ ptu_run_fp(suite, cutoff, pfix, ppt_tsc);
+ ptu_run_fp(suite, cutoff, pfix, ppt_cbr);
+ ptu_run_fp(suite, cutoff, pfix, ppt_tma);
+ ptu_run_fp(suite, cutoff, pfix, ppt_mtc);
+ ptu_run_f(suite, cutoff_cyc, pfix);
+ ptu_run_fp(suite, cutoff_mode, pfix, pt_mol_exec);
+ ptu_run_fp(suite, cutoff_mode, pfix, pt_mol_tsx);
+ ptu_run_fp(suite, cutoff, pfix, ppt_vmcs);
+ ptu_run_fp(suite, cutoff, pfix, ppt_mnt);
+ ptu_run_fp(suite, cutoff, pfix, ppt_exstop);
+ ptu_run_fp(suite, cutoff, pfix, ppt_mwait);
+ ptu_run_fp(suite, cutoff, pfix, ppt_pwre);
+ ptu_run_fp(suite, cutoff, pfix, ppt_pwrx);
+ ptu_run_fp(suite, cutoff, pfix, ppt_ptw);
+
+ return ptunit_report(&suite);
+}
+
+
+/* Dummy decode functions to satisfy link dependencies.
+ *
+ * As a nice side-effect, we will know if we need to add more tests when
+ * adding new decoder functions.
+ */
+struct pt_query_decoder;
+
+int pt_qry_decode_unknown(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pad(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_psb(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tip(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tnt_8(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tnt_64(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tip_pge(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tip_pgd(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_fup(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_fup(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pip(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_pip(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_ovf(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mode(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_mode(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_psbend(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tsc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_tsc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_cbr(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_cbr(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_tma(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mtc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_cyc(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_stop(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_vmcs(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_vmcs(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mnt(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_header_mnt(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_exstop(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_mwait(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pwre(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_pwrx(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
+int pt_qry_decode_ptw(struct pt_query_decoder *d)
+{
+ (void) d;
+
+ return -pte_internal;
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-query.c b/contrib/processor-trace/libipt/test/src/ptunit-query.c
new file mode 100644
index 0000000000000..c47ad1223ed29
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-query.c
@@ -0,0 +1,2873 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_last_ip.h"
+#include "pt_decoder_function.h"
+#include "pt_query_decoder.h"
+#include "pt_encoder.h"
+#include "pt_opcodes.h"
+
+
+/* A query testing fixture. */
+
+struct ptu_decoder_fixture {
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct ptu_decoder_fixture *);
+ struct ptunit_result (*fini)(struct ptu_decoder_fixture *);
+
+ /* Encode an optional header for the test to read over. */
+ struct ptunit_result (*header)(struct ptu_decoder_fixture *);
+
+ /* The trace buffer. */
+ uint8_t buffer[1024];
+
+ /* The configuration under test. */
+ struct pt_config config;
+
+ /* A encoder and query decoder for the above configuration. */
+ struct pt_encoder encoder;
+ struct pt_query_decoder decoder;
+
+ /* For tracking last-ip in tests. */
+ struct pt_last_ip last_ip;
+};
+
+/* An invalid address. */
+static const uint64_t pt_dfix_bad_ip = (1ull << 62) - 1;
+
+/* A sign-extended address. */
+static const uint64_t pt_dfix_sext_ip = 0xffffff00ff00ff00ull;
+
+/* The highest possible address. */
+static const uint64_t pt_dfix_max_ip = (1ull << 47) - 1;
+
+/* The highest possible cr3 value. */
+static const uint64_t pt_dfix_max_cr3 = ((1ull << 47) - 1) & ~0x1f;
+
+/* Synchronize the decoder at the beginning of the trace stream, avoiding the
+ * initial PSB header.
+ */
+static struct ptunit_result ptu_sync_decoder(struct pt_query_decoder *decoder)
+{
+ ptu_ptr(decoder);
+ decoder->enabled = 1;
+
+ (void) pt_df_fetch(&decoder->next, decoder->pos, &decoder->config);
+ return ptu_passed();
+}
+
+/* Cut off the last encoded packet. */
+static struct ptunit_result cutoff(struct pt_query_decoder *decoder,
+ const struct pt_encoder *encoder)
+{
+ uint8_t *pos;
+
+ ptu_ptr(decoder);
+ ptu_ptr(encoder);
+
+ pos = encoder->pos;
+ ptu_ptr(pos);
+
+ pos -= 1;
+ ptu_ptr_le(decoder->config.begin, pos);
+
+ decoder->config.end = pos;
+ return ptu_passed();
+}
+
+static struct ptunit_result indir_not_synced(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ int errcode;
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_nosync);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cond_not_synced(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_nosync);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_not_synced(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_event event;
+ int errcode;
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_nosync);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_backward(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t sync[3], offset, ip;
+ int errcode;
+
+ /* Check that we can use repeated pt_qry_sync_backward() to iterate over
+ * synchronization points in backwards order.
+ */
+
+ errcode = pt_enc_get_offset(encoder, &sync[0]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[1]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[2]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ /* Synchronize repeatedly and check that we reach each PSB in the
+ * correct order.
+ */
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[2]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[1]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[0]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+sync_backward_empty_end(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t sync[3], offset, ip;
+ int errcode;
+
+ /* Check that we can use repeated pt_qry_sync_backward() to iterate over
+ * synchronization points in backwards order.
+ *
+ * There's an empty PSB+ at the end. We skip it.
+ */
+
+ errcode = pt_enc_get_offset(encoder, &sync[0]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[1]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[2]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_psbend(encoder);
+
+ /* Synchronize repeatedly and check that we reach each PSB in the
+ * correct order.
+ */
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[1]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[0]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+sync_backward_empty_mid(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t sync[3], offset, ip;
+ int errcode;
+
+ /* Check that we can use repeated pt_qry_sync_backward() to iterate over
+ * synchronization points in backwards order.
+ *
+ * There's an empty PSB+ in the middle. We skip it.
+ */
+
+ errcode = pt_enc_get_offset(encoder, &sync[0]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[1]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[2]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ /* Synchronize repeatedly and check that we reach each PSB in the
+ * correct order.
+ */
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[2]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[0]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+sync_backward_empty_begin(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t sync[3], offset, ip;
+ int errcode;
+
+ /* Check that we can use repeated pt_qry_sync_backward() to iterate over
+ * synchronization points in backwards order.
+ *
+ * There's an empty PSB+ at the beginning. We skip it.
+ */
+
+ errcode = pt_enc_get_offset(encoder, &sync[0]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[1]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[2]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ /* Synchronize repeatedly and check that we reach each PSB in the
+ * correct order.
+ */
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[2]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[1]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+decode_sync_backward(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ uint64_t sync[2], offset, ip;
+ int errcode;
+
+ /* Check that we can use sync_backward to re-sync at the current trace
+ * segment as well as to find the previous trace segment.
+ */
+
+ errcode = pt_enc_get_offset(encoder, &sync[0]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_enc_get_offset(encoder, &sync[1]);
+ ptu_int_ge(errcode, 0);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_exec(encoder, ptem_64bit);
+ pt_encode_psbend(encoder);
+
+
+ errcode = pt_qry_sync_forward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[0]);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_ge(errcode, 0);
+ ptu_int_eq(event.type, ptev_exec_mode);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_ge(errcode, 0);
+ ptu_int_eq(event.type, ptev_exec_mode);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[1]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_ge(errcode, 0);
+
+ errcode = pt_qry_get_sync_offset(decoder, &offset);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(offset, sync[0]);
+
+ errcode = pt_qry_sync_backward(decoder, &ip);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result indir_null(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_config *config = &decoder->config;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ int errcode;
+
+ errcode = pt_qry_indirect_branch(NULL, &addr);
+ ptu_int_eq(errcode, -pte_invalid);
+ ptu_uint_eq(addr, ip);
+
+ errcode = pt_qry_indirect_branch(decoder, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+ ptu_ptr_eq(decoder->pos, config->begin);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result indir_empty(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_config *config = &decoder->config;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ int errcode;
+
+ decoder->pos = config->end;
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_eos);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result indir(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip packet;
+ uint64_t addr = pt_dfix_bad_ip;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = pt_dfix_sext_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_tip(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ if (ipc == pt_ipc_suppressed) {
+ ptu_int_eq(errcode, pts_ip_suppressed | pts_eos);
+ ptu_uint_eq(addr, pt_dfix_bad_ip);
+ } else {
+ ptu_int_eq(errcode, pts_eos);
+ ptu_uint_eq(addr, dfix->last_ip.ip);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result indir_tnt(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip packet;
+ uint64_t addr = pt_dfix_bad_ip;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = pt_dfix_sext_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_tnt_8(encoder, 0ull, 1);
+ pt_encode_tip(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ if (ipc == pt_ipc_suppressed) {
+ ptu_int_eq(errcode, pts_ip_suppressed);
+ ptu_uint_eq(addr, pt_dfix_bad_ip);
+ } else {
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(addr, dfix->last_ip.ip);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result indir_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ int errcode;
+
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_eos);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+indir_skip_tnt_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ int errcode;
+
+ pt_encode_tnt_8(encoder, 0, 1);
+ pt_encode_tnt_8(encoder, 0, 1);
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+indir_skip_tip_pge_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ const uint8_t *pos;
+ int errcode;
+
+ pos = encoder->pos;
+ pt_encode_tip_pge(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+indir_skip_tip_pgd_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ const uint8_t *pos;
+ int errcode;
+
+ pos = encoder->pos;
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+indir_skip_fup_tip_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ const uint8_t *pos;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+ pos = encoder->pos;
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+indir_skip_fup_tip_pgd_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip = pt_dfix_bad_ip, addr = ip;
+ const uint8_t *pos;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+ pos = encoder->pos;
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_uint_eq(addr, ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cond_null(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_config *config = &decoder->config;
+ int errcode, tnt = 0xbc, taken = tnt;
+
+ errcode = pt_qry_cond_branch(NULL, &taken);
+ ptu_int_eq(errcode, -pte_invalid);
+ ptu_int_eq(taken, tnt);
+
+ errcode = pt_qry_cond_branch(decoder, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+ ptu_ptr_eq(decoder->pos, config->begin);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cond_empty(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_config *config = &decoder->config;
+ int errcode, tnt = 0xbc, taken = tnt;
+
+ decoder->pos = config->end;
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_eos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cond(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+
+ pt_encode_tnt_8(encoder, 0x02, 3);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, 0);
+ ptu_int_eq(taken, 0);
+
+ taken = tnt;
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, 0);
+ ptu_int_eq(taken, 1);
+
+ taken = tnt;
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(taken, 0);
+
+ taken = tnt;
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_eos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cond_skip_tip_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+ const uint8_t *pos;
+
+ pos = encoder->pos;
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tnt_8(encoder, 0, 1);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+cond_skip_tip_pge_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+ const uint8_t *pos;
+
+ pos = encoder->pos;
+ pt_encode_tip_pge(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tnt_8(encoder, 0, 1);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+cond_skip_tip_pgd_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+ const uint8_t *pos;
+
+ pos = encoder->pos;
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tnt_8(encoder, 0, 1);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+cond_skip_fup_tip_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+ const uint8_t *pos;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+ pos = encoder->pos;
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tnt_8(encoder, 0, 1);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+cond_skip_fup_tip_pgd_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, tnt = 0xbc, taken = tnt;
+ const uint8_t *pos;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+ pos = encoder->pos;
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tnt_8(encoder, 0, 1);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+ ptu_int_eq(taken, tnt);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_null(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_config *config = &decoder->config;
+ struct pt_event event;
+ int errcode;
+
+ errcode = pt_qry_event(NULL, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_qry_event(decoder, NULL, sizeof(event));
+ ptu_int_eq(errcode, -pte_invalid);
+ ptu_ptr_eq(decoder->pos, config->begin);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_bad_size(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_event event;
+ int errcode;
+
+ errcode = pt_qry_event(decoder, &event, 4);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_small_size(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ union {
+ struct pt_event event;
+ uint8_t buffer[41];
+ } variant;
+ int errcode;
+
+ memset(variant.buffer, 0xcd, sizeof(variant.buffer));
+
+ pt_encode_tip_pge(encoder, 0ull, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &variant.event, 40);
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(variant.event.type, ptev_enabled);
+ ptu_uint_eq(variant.buffer[40], 0xcd);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_big_size(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ union {
+ struct pt_event event;
+ uint8_t buffer[1024];
+ } variant;
+ int errcode;
+
+ memset(variant.buffer, 0xcd, sizeof(variant.buffer));
+
+ pt_encode_tip_pge(encoder, 0ull, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &variant.event, sizeof(variant.buffer));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(variant.event.type, ptev_enabled);
+ ptu_uint_eq(variant.buffer[sizeof(variant.event)], 0xcd);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_empty(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_config *config = &decoder->config;
+ struct pt_event event;
+ int errcode;
+
+ decoder->pos = config->end;
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_enabled(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip packet;
+ struct pt_event event;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = pt_dfix_max_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_tip_pge(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ if (ipc == pt_ipc_suppressed)
+ ptu_int_eq(errcode, -pte_bad_packet);
+ else {
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_enabled);
+ ptu_uint_eq(event.variant.enabled.ip, dfix->last_ip.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_enabled_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_tip_pge(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_disabled(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip packet;
+ struct pt_event event;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = pt_dfix_sext_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_tip_pgd(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ if (ipc == pt_ipc_suppressed)
+ ptu_uint_ne(event.ip_suppressed, 0);
+ else {
+ ptu_uint_eq(event.ip_suppressed, 0);
+ ptu_uint_eq(event.variant.disabled.ip, dfix->last_ip.ip);
+ }
+ ptu_int_eq(event.type, ptev_disabled);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_disabled_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_update_32);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_disabled(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc, uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip fup, tip;
+ struct pt_event event;
+ int errcode;
+
+ fup.ipc = pt_ipc_sext_48;
+ fup.ip = pt_dfix_max_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &fup, &dfix->config);
+
+ tip.ipc = ipc;
+ tip.ip = pt_dfix_sext_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &tip, &dfix->config);
+
+ pt_encode_fup(encoder, fup.ip, fup.ipc);
+ pt_encode_tip_pgd(encoder, tip.ip, tip.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ if (ipc == pt_ipc_suppressed)
+ ptu_uint_ne(event.ip_suppressed, 0);
+ else {
+ ptu_uint_eq(event.ip_suppressed, 0);
+ ptu_uint_eq(event.variant.async_disabled.ip, dfix->last_ip.ip);
+ }
+ ptu_int_eq(event.type, ptev_async_disabled);
+ ptu_uint_eq(event.variant.async_disabled.at, fup.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_disabled_suppressed_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_suppressed);
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_ip_suppressed);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_disabled_cutoff_fail_a(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ uint64_t at = pt_dfix_sext_ip;
+ int errcode;
+
+ pt_encode_fup(encoder, at, pt_ipc_sext_48);
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_update_16);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_disabled_cutoff_fail_b(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_branch_suppressed_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_suppressed);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_ip_suppressed);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_async_branch(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip fup, tip;
+ struct pt_event event;
+ int errcode;
+
+ fup.ipc = pt_ipc_sext_48;
+ fup.ip = pt_dfix_max_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &fup, &dfix->config);
+
+ tip.ipc = ipc;
+ tip.ip = pt_dfix_sext_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &tip, &dfix->config);
+
+ pt_encode_fup(encoder, fup.ip, fup.ipc);
+ pt_encode_tip(encoder, tip.ip, tip.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ if (ipc == pt_ipc_suppressed)
+ ptu_uint_ne(event.ip_suppressed, 0);
+ else {
+ ptu_uint_eq(event.ip_suppressed, 0);
+ ptu_uint_eq(event.variant.async_branch.to, dfix->last_ip.ip);
+ }
+ ptu_int_eq(event.type, ptev_async_branch);
+ ptu_uint_eq(event.variant.async_branch.from, fup.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_branch_cutoff_fail_a(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+ pt_encode_tip_pgd(encoder, 0, pt_ipc_update_16);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_branch_cutoff_fail_b(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_paging(struct ptu_decoder_fixture *dfix,
+ uint8_t flags, uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ uint64_t cr3 = pt_dfix_max_cr3;
+ int errcode;
+
+ pt_encode_pip(encoder, cr3, flags);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_paging);
+ ptu_uint_eq(event.variant.paging.cr3, cr3);
+ ptu_uint_eq(event.variant.paging.non_root, (flags & pt_pl_pip_nr) != 0);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_paging_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_pip(encoder, 0, 0);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_paging(struct ptu_decoder_fixture *dfix, uint8_t flags,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ uint64_t to = pt_dfix_sext_ip, from = to & ~0xffffull;
+ uint64_t cr3 = pt_dfix_max_cr3;
+ int errcode;
+
+ pt_encode_fup(encoder, from, pt_ipc_sext_48);
+ pt_encode_pip(encoder, cr3, flags);
+ pt_encode_tip(encoder, to, pt_ipc_update_16);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_int_eq(event.type, ptev_async_branch);
+ ptu_uint_eq(event.variant.async_branch.from, from);
+ ptu_uint_eq(event.variant.async_branch.to, to);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_async_paging);
+ ptu_uint_eq(event.variant.async_paging.cr3, cr3);
+ ptu_uint_eq(event.variant.async_paging.non_root,
+ (flags & pt_pl_pip_nr) != 0);
+ ptu_uint_eq(event.variant.async_paging.ip, to);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_paging_suppressed(struct ptu_decoder_fixture *dfix, uint8_t flags,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ uint64_t from = pt_dfix_sext_ip, cr3 = pt_dfix_max_cr3;
+ int errcode;
+
+ pt_encode_fup(encoder, from, pt_ipc_sext_48);
+ pt_encode_pip(encoder, cr3, flags);
+ pt_encode_tip(encoder, 0, pt_ipc_suppressed);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_uint_ne(event.ip_suppressed, 0);
+ ptu_int_eq(event.type, ptev_async_branch);
+ ptu_uint_eq(event.variant.async_branch.from, from);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_uint_ne(event.ip_suppressed, 0);
+ ptu_int_eq(event.type, ptev_async_paging);
+ ptu_uint_eq(event.variant.async_paging.cr3, cr3);
+ ptu_uint_eq(event.variant.async_paging.non_root,
+ (flags & pt_pl_pip_nr) != 0);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_async_paging_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_fup(encoder, 0, pt_ipc_sext_48);
+ pt_encode_pip(encoder, 0, 0);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_overflow_fup(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = 0xccull;
+
+ pt_last_ip_init(&dfix->last_ip);
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_ovf(encoder);
+ pt_encode_fup(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ switch (ipc) {
+ case pt_ipc_suppressed:
+ ptu_int_eq(errcode, -pte_noip);
+ break;
+
+ case pt_ipc_update_16:
+ case pt_ipc_update_32:
+ case pt_ipc_update_48:
+ case pt_ipc_sext_48:
+ case pt_ipc_full:
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_overflow);
+ ptu_uint_eq(event.variant.overflow.ip, dfix->last_ip.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+ break;
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_overflow_tip_pge(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc, uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ struct pt_packet_ip packet;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = 0xccull;
+
+ pt_last_ip_init(&dfix->last_ip);
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_ovf(encoder);
+ pt_encode_tip_pge(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_int_eq(event.type, ptev_overflow);
+ ptu_uint_ne(event.ip_suppressed, 0);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ switch (ipc) {
+ case pt_ipc_suppressed:
+ ptu_int_eq(errcode, -pte_bad_packet);
+ break;
+
+ case pt_ipc_update_16:
+ case pt_ipc_update_32:
+ case pt_ipc_update_48:
+ case pt_ipc_sext_48:
+ case pt_ipc_full:
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_enabled);
+ ptu_uint_eq(event.variant.enabled.ip, dfix->last_ip.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+ break;
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_overflow_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_ovf(encoder);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_stop(struct ptu_decoder_fixture *dfix,
+ uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_stop(encoder);
+
+ ptu_sync_decoder(decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_stop);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_exec_mode_tip(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc, uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ enum pt_exec_mode mode = ptem_16bit;
+ struct pt_packet_ip packet;
+ struct pt_event event;
+ uint64_t addr = 0ull;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = pt_dfix_max_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_mode_exec(encoder, mode);
+ pt_encode_tip(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, 0);
+ if (ipc == pt_ipc_suppressed)
+ ptu_uint_ne(event.ip_suppressed, 0);
+ else {
+ ptu_uint_eq(event.ip_suppressed, 0);
+ ptu_uint_eq(event.variant.exec_mode.ip, dfix->last_ip.ip);
+ }
+ ptu_int_eq(event.type, ptev_exec_mode);
+ ptu_int_eq(event.variant.exec_mode.mode, mode);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ if (ipc == pt_ipc_suppressed)
+ ptu_int_eq(errcode, pts_ip_suppressed | pts_eos);
+ else {
+ ptu_int_eq(errcode, pts_eos);
+ ptu_uint_eq(addr, dfix->last_ip.ip);
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_exec_mode_tip_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_mode_exec(encoder, ptem_32bit);
+ pt_encode_tip(encoder, 0, pt_ipc_update_16);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_exec_mode_tip_pge(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc, uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ enum pt_exec_mode mode = ptem_16bit;
+ struct pt_packet_ip packet;
+ struct pt_event event;
+ uint64_t addr = 0ull;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = pt_dfix_max_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_mode_exec(encoder, mode);
+ pt_encode_tip_pge(encoder, packet.ip, packet.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+ decoder->enabled = 0;
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ if (ipc == pt_ipc_suppressed) {
+ ptu_int_eq(errcode, -pte_bad_packet);
+ ptu_uint_eq(addr, 0ull);
+ } else {
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_int_eq(event.type, ptev_enabled);
+ ptu_uint_eq(event.variant.enabled.ip, dfix->last_ip.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_exec_mode);
+ ptu_int_eq(event.variant.exec_mode.mode, mode);
+ ptu_uint_eq(event.variant.exec_mode.ip, dfix->last_ip.ip);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_exec_mode_tip_pge_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_mode_exec(encoder, ptem_16bit);
+ pt_encode_tip_pge(encoder, 0, pt_ipc_sext_48);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_exec_mode_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_mode_exec(encoder, ptem_64bit);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result event_tsx_fup(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc,
+ uint8_t flags, uint64_t tsc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip fup, tip;
+ struct pt_event event;
+ uint64_t addr = 0;
+ int errcode;
+
+ fup.ipc = ipc;
+ fup.ip = pt_dfix_max_ip;
+ pt_last_ip_update_ip(&dfix->last_ip, &fup, &dfix->config);
+
+ tip.ipc = pt_ipc_sext_48;
+ tip.ip = pt_dfix_sext_ip;
+
+ pt_encode_mode_tsx(encoder, flags);
+ pt_encode_fup(encoder, fup.ip, fup.ipc);
+ pt_encode_tip(encoder, tip.ip, tip.ipc);
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, 0);
+ if (ipc == pt_ipc_suppressed)
+ ptu_uint_ne(event.ip_suppressed, 0);
+ else {
+ ptu_uint_eq(event.ip_suppressed, 0);
+ ptu_uint_eq(event.variant.tsx.ip, dfix->last_ip.ip);
+ }
+ ptu_int_eq(event.type, ptev_tsx);
+ ptu_int_eq(event.variant.tsx.speculative,
+ (flags & pt_mob_tsx_intx) != 0);
+ ptu_int_eq(event.variant.tsx.aborted,
+ (flags & pt_mob_tsx_abrt) != 0);
+
+ if (!tsc)
+ ptu_int_eq(event.has_tsc, 0);
+ else {
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, tsc);
+ }
+
+ errcode = pt_qry_indirect_branch(decoder, &addr);
+ ptu_int_eq(errcode, pts_eos);
+ ptu_uint_eq(addr, tip.ip);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_tsx_fup_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_mode_tsx(encoder, 0);
+ pt_encode_fup(encoder, 0, pt_ipc_update_16);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_tsx_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_mode_tsx(encoder, 0);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_skip_tip_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ const uint8_t *pos;
+ int errcode;
+
+ pos = encoder->pos;
+ pt_encode_tip(encoder, 0, pt_ipc_sext_48);
+ /* We omit the actual event - we don't get that far, anyway. */
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_bad_query);
+ ptu_ptr_eq(decoder->pos, pos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_skip_tnt_8_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_tnt_8(encoder, 0, 1);
+ pt_encode_tnt_8(encoder, 0, 1);
+ /* We omit the actual event - we don't get that far, anyway. */
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_bad_query);
+ /* The fail position depends on the fixture's header. */
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+event_skip_tnt_64_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_tnt_64(encoder, 0, 1);
+ pt_encode_tnt_64(encoder, 0, 1);
+ /* We omit the actual event - we don't get that far, anyway. */
+
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, -pte_bad_query);
+ /* The fail position depends on the fixture's header. */
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_event(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip packet;
+ struct pt_event event;
+ uint64_t addr = 0ull;
+ int errcode;
+
+ packet.ipc = ipc;
+ packet.ip = 0xccull;
+
+ pt_last_ip_init(&dfix->last_ip);
+ pt_last_ip_update_ip(&dfix->last_ip, &packet, &dfix->config);
+
+ pt_encode_psb(encoder);
+ pt_encode_mode_tsx(encoder, pt_mob_tsx_intx);
+ pt_encode_fup(encoder, packet.ip, packet.ipc);
+ pt_encode_psbend(encoder);
+
+ errcode = pt_qry_sync_forward(decoder, &addr);
+ switch (ipc) {
+ case pt_ipc_suppressed:
+ ptu_int_eq(errcode, (pts_event_pending | pts_ip_suppressed));
+ break;
+
+ case pt_ipc_update_16:
+ case pt_ipc_update_32:
+ case pt_ipc_update_48:
+ case pt_ipc_sext_48:
+ case pt_ipc_full:
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_uint_eq(addr, dfix->last_ip.ip);
+ break;
+ }
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+ ptu_uint_ne(event.status_update, 0);
+ if (ipc == pt_ipc_suppressed)
+ ptu_uint_ne(event.ip_suppressed, 0);
+ else {
+ ptu_uint_eq(event.ip_suppressed, 0);
+ ptu_uint_eq(event.variant.tsx.ip, dfix->last_ip.ip);
+ }
+ ptu_int_eq(event.type, ptev_tsx);
+ ptu_int_eq(event.variant.tsx.speculative, 1);
+ ptu_int_eq(event.variant.tsx.aborted, 0);
+ ptu_int_eq(event.has_tsc, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+sync_event_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t addr;
+ int errcode;
+
+ pt_encode_psb(encoder);
+ pt_encode_psbend(encoder);
+
+ ptu_check(cutoff, decoder, encoder);
+
+ errcode = pt_qry_sync_forward(decoder, &addr);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+sync_event_incomplete_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t addr;
+ int errcode;
+
+ pt_encode_psb(encoder);
+
+ errcode = pt_qry_sync_forward(decoder, &addr);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_ovf_event(struct ptu_decoder_fixture *dfix,
+ enum pt_ip_compression ipc)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_packet_ip fup, ovf;
+ struct pt_event event;
+ uint64_t addr = 0;
+ int errcode;
+
+ fup.ipc = pt_ipc_sext_48;
+ fup.ip = pt_dfix_max_ip;
+
+ ovf.ipc = ipc;
+ ovf.ip = 0xccull;
+
+ pt_last_ip_init(&dfix->last_ip);
+ pt_last_ip_update_ip(&dfix->last_ip, &ovf, &dfix->config);
+
+ pt_encode_psb(encoder);
+ pt_encode_fup(encoder, fup.ip, fup.ipc);
+ pt_encode_mode_tsx(encoder, 0);
+ pt_encode_tsc(encoder, 0x1000);
+ pt_encode_ovf(encoder);
+ pt_encode_fup(encoder, ovf.ip, ovf.ipc);
+
+ errcode = pt_qry_sync_forward(decoder, &addr);
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_uint_eq(addr, fup.ip);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_event_pending);
+ ptu_uint_ne(event.status_update, 0);
+ ptu_int_eq(event.type, ptev_tsx);
+ ptu_int_eq(event.variant.tsx.speculative, 0);
+ ptu_int_eq(event.variant.tsx.aborted, 0);
+ ptu_uint_eq(event.variant.tsx.ip, fup.ip);
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, 0x1000);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ switch (ipc) {
+ case pt_ipc_suppressed:
+ ptu_int_eq(errcode, -pte_noip);
+ return ptu_passed();
+
+ case pt_ipc_update_16:
+ case pt_ipc_update_32:
+ case pt_ipc_update_48:
+ case pt_ipc_sext_48:
+ case pt_ipc_full:
+ ptu_int_eq(errcode, pts_eos);
+ ptu_int_eq(event.type, ptev_overflow);
+ ptu_uint_eq(event.variant.overflow.ip, dfix->last_ip.ip);
+ ptu_int_eq(event.has_tsc, 1);
+ ptu_uint_eq(event.tsc, 0x1000);
+ break;
+ }
+
+ return ptu_passed();
+}
+
+static struct ptunit_result
+sync_ovf_event_cutoff_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t addr;
+ int errcode;
+
+ pt_encode_psb(encoder);
+ pt_encode_ovf(encoder);
+
+ ptu_check(cutoff, decoder, encoder);
+
+ errcode = pt_qry_sync_forward(decoder, &addr);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result time_null_fail(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint64_t tsc;
+ int errcode;
+
+ errcode = pt_qry_time(NULL, NULL, NULL, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_qry_time(decoder, NULL, NULL, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_qry_time(NULL, &tsc, NULL, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result time_initial(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint64_t tsc;
+ int errcode;
+
+ errcode = pt_qry_time(decoder, &tsc, NULL, NULL);
+ ptu_int_eq(errcode, -pte_no_time);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result time(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint64_t tsc, exp;
+ int errcode;
+
+ exp = 0x11223344556677ull;
+
+ decoder->last_time.have_tsc = 1;
+ decoder->last_time.tsc = exp;
+
+ errcode = pt_qry_time(decoder, &tsc, NULL, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(tsc, exp);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cbr_null(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint32_t cbr;
+ int errcode;
+
+ errcode = pt_qry_core_bus_ratio(NULL, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_qry_core_bus_ratio(decoder, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ errcode = pt_qry_core_bus_ratio(NULL, &cbr);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cbr_initial(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint32_t cbr;
+ int errcode;
+
+ errcode = pt_qry_core_bus_ratio(decoder, &cbr);
+ ptu_int_eq(errcode, -pte_no_cbr);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cbr(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ uint32_t cbr;
+ int errcode;
+
+ decoder->last_time.have_cbr = 1;
+ decoder->last_time.cbr = 42;
+
+ errcode = pt_qry_core_bus_ratio(decoder, &cbr);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(cbr, 42);
+
+ return ptu_passed();
+}
+
+/* Test that end-of-stream is indicated correctly when the stream ends with a
+ * partial non-query-relevant packet.
+ */
+static struct ptunit_result indir_cyc_cutoff(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ uint64_t ip;
+ int errcode;
+
+ pt_encode_tip(encoder, 0xa000ull, pt_ipc_full);
+ pt_encode_cyc(encoder, 0xfff);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_indirect_branch(decoder, &ip);
+ ptu_int_eq(errcode, pts_eos);
+
+ return ptu_passed();
+}
+
+/* Test that end-of-stream is indicated correctly when the stream ends with a
+ * partial non-query-relevant packet.
+ */
+static struct ptunit_result cond_cyc_cutoff(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ int errcode, taken;
+
+ pt_encode_tnt_8(encoder, 0, 1);
+ pt_encode_cyc(encoder, 0xfff);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_cond_branch(decoder, &taken);
+ ptu_int_eq(errcode, pts_eos);
+
+ return ptu_passed();
+}
+
+/* Test that end-of-stream is indicated correctly when the stream ends with a
+ * partial non-query-relevant packet.
+ */
+static struct ptunit_result event_cyc_cutoff(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+ struct pt_event event;
+ int errcode;
+
+ pt_encode_tip_pgd(encoder, 0ull, pt_ipc_full);
+ pt_encode_cyc(encoder, 0xffff);
+
+ ptu_check(cutoff, decoder, encoder);
+ ptu_check(ptu_sync_decoder, decoder);
+
+ errcode = pt_qry_event(decoder, &event, sizeof(event));
+ ptu_int_eq(errcode, pts_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptu_dfix_init(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_config *config = &dfix->config;
+ int errcode;
+
+ (void) memset(dfix->buffer, 0, sizeof(dfix->buffer));
+
+ pt_config_init(config);
+
+ config->begin = dfix->buffer;
+ config->end = dfix->buffer + sizeof(dfix->buffer);
+
+ errcode = pt_encoder_init(&dfix->encoder, config);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_qry_decoder_init(&dfix->decoder, config);
+ ptu_int_eq(errcode, 0);
+
+ dfix->decoder.ip.ip = pt_dfix_bad_ip;
+ dfix->decoder.ip.have_ip = 1;
+ dfix->decoder.ip.suppressed = 0;
+
+ dfix->last_ip = dfix->decoder.ip;
+
+ if (dfix->header)
+ dfix->header(dfix);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result ptu_dfix_fini(struct ptu_decoder_fixture *dfix)
+{
+ pt_qry_decoder_fini(&dfix->decoder);
+ pt_encoder_fini(&dfix->encoder);
+
+ return ptu_passed();
+}
+
+/* Synchronize the decoder at the beginnig of an empty buffer. */
+static struct ptunit_result
+ptu_dfix_header_sync(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+
+ /* Synchronize the decoder at the beginning of the buffer. */
+ decoder->pos = decoder->config.begin;
+
+ return ptu_passed();
+}
+
+/* Synchronize the decoder at the beginnig of a buffer containing packets that
+ * should be skipped for unconditional indirect branch queries.
+ */
+static struct ptunit_result
+ptu_dfix_header_indir(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+
+ pt_encode_pad(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0);
+
+ /* Synchronize the decoder at the beginning of the buffer. */
+ decoder->pos = decoder->config.begin;
+
+ return ptu_passed();
+}
+
+/* Synchronize the decoder at the beginnig of a buffer containing packets that
+ * should be skipped for unconditional indirect branch queries including a PSB.
+ */
+static struct ptunit_result
+ptu_dfix_header_indir_psb(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+
+ /* The psb must be empty since the tests won't skip status events.
+ * On the other hand, we do need to provide an address since tests
+ * may want to update last-ip, which requires a last-ip, of course.
+ */
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0);
+ pt_encode_psb(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0);
+ pt_encode_fup(encoder, pt_dfix_sext_ip, pt_ipc_sext_48);
+ pt_encode_psbend(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_pad(encoder);
+
+ /* Synchronize the decoder at the beginning of the buffer. */
+ decoder->pos = decoder->config.begin;
+
+ return ptu_passed();
+}
+
+/* Synchronize the decoder at the beginnig of a buffer containing packets that
+ * should be skipped for conditional branch queries.
+ */
+static struct ptunit_result
+ptu_dfix_header_cond(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+
+ /* The psb must be empty since the tests won't skip status events.
+ * On the other hand, we do need to provide an address since tests
+ * may want to update last-ip, which requires a last-ip, of course.
+ */
+ pt_encode_pad(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_psb(encoder);
+ pt_encode_tsc(encoder, 0);
+ pt_encode_pad(encoder);
+ pt_encode_fup(encoder, pt_dfix_sext_ip, pt_ipc_sext_48);
+ pt_encode_psbend(encoder);
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0);
+ pt_encode_pad(encoder);
+
+ /* Synchronize the decoder at the beginning of the buffer. */
+ decoder->pos = decoder->config.begin;
+
+ return ptu_passed();
+}
+
+/* Synchronize the decoder at the beginnig of a buffer containing packets that
+ * should be skipped for event queries.
+ */
+static struct ptunit_result
+ptu_dfix_header_event(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+
+ pt_encode_pad(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0x1000);
+
+ /* Synchronize the decoder at the beginning of the buffer. */
+ decoder->pos = decoder->config.begin;
+
+ return ptu_passed();
+}
+
+/* Synchronize the decoder at the beginnig of a buffer containing packets that
+ * should be skipped for event queries including a PSB.
+ */
+static struct ptunit_result
+ptu_dfix_header_event_psb(struct ptu_decoder_fixture *dfix)
+{
+ struct pt_query_decoder *decoder = &dfix->decoder;
+ struct pt_encoder *encoder = &dfix->encoder;
+
+ /* The psb must be empty since the tests won't skip status events.
+ * On the other hand, we do need to provide an address since tests
+ * may want to update last-ip, which requires a last-ip, of course.
+ */
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0);
+ pt_encode_psb(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_pad(encoder);
+ pt_encode_tsc(encoder, 0x1000);
+ pt_encode_fup(encoder, pt_dfix_sext_ip, pt_ipc_sext_48);
+ pt_encode_psbend(encoder);
+ pt_encode_mtc(encoder, 1);
+ pt_encode_pad(encoder);
+
+ /* Synchronize the decoder at the beginning of the buffer. */
+ decoder->pos = decoder->config.begin;
+
+ return ptu_passed();
+}
+
+static struct ptu_decoder_fixture dfix_raw;
+static struct ptu_decoder_fixture dfix_empty;
+static struct ptu_decoder_fixture dfix_indir;
+static struct ptu_decoder_fixture dfix_indir_psb;
+static struct ptu_decoder_fixture dfix_cond;
+static struct ptu_decoder_fixture dfix_event;
+static struct ptu_decoder_fixture dfix_event_psb;
+
+static void init_fixtures(void)
+{
+ dfix_raw.init = ptu_dfix_init;
+ dfix_raw.fini = ptu_dfix_fini;
+
+ dfix_empty = dfix_raw;
+ dfix_empty.header = ptu_dfix_header_sync;
+
+ dfix_indir = dfix_raw;
+ dfix_indir.header = ptu_dfix_header_indir;
+
+ dfix_indir_psb = dfix_raw;
+ dfix_indir_psb.header = ptu_dfix_header_indir_psb;
+
+ dfix_cond = dfix_raw;
+ dfix_cond.header = ptu_dfix_header_cond;
+
+ dfix_event = dfix_raw;
+ dfix_event.header = ptu_dfix_header_event;
+
+ dfix_event_psb = dfix_raw;
+ dfix_event_psb.header = ptu_dfix_header_event_psb;
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ init_fixtures();
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run_f(suite, indir_not_synced, dfix_raw);
+ ptu_run_f(suite, cond_not_synced, dfix_raw);
+ ptu_run_f(suite, event_not_synced, dfix_raw);
+
+ ptu_run_f(suite, sync_backward, dfix_raw);
+ ptu_run_f(suite, sync_backward_empty_end, dfix_raw);
+ ptu_run_f(suite, sync_backward_empty_mid, dfix_raw);
+ ptu_run_f(suite, sync_backward_empty_begin, dfix_raw);
+ ptu_run_f(suite, decode_sync_backward, dfix_raw);
+
+ ptu_run_f(suite, indir_null, dfix_empty);
+ ptu_run_f(suite, indir_empty, dfix_empty);
+ ptu_run_fp(suite, indir, dfix_empty, pt_ipc_suppressed);
+ ptu_run_fp(suite, indir, dfix_empty, pt_ipc_update_16);
+ ptu_run_fp(suite, indir, dfix_empty, pt_ipc_update_32);
+ ptu_run_fp(suite, indir, dfix_empty, pt_ipc_update_48);
+ ptu_run_fp(suite, indir, dfix_empty, pt_ipc_sext_48);
+ ptu_run_fp(suite, indir, dfix_empty, pt_ipc_full);
+ ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_suppressed);
+ ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_update_16);
+ ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_update_32);
+ ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_update_48);
+ ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_sext_48);
+ ptu_run_fp(suite, indir_tnt, dfix_empty, pt_ipc_full);
+ ptu_run_f(suite, indir_cutoff_fail, dfix_empty);
+ ptu_run_f(suite, indir_skip_tnt_fail, dfix_empty);
+ ptu_run_f(suite, indir_skip_tip_pge_fail, dfix_empty);
+ ptu_run_f(suite, indir_skip_tip_pgd_fail, dfix_empty);
+ ptu_run_f(suite, indir_skip_fup_tip_fail, dfix_empty);
+ ptu_run_f(suite, indir_skip_fup_tip_pgd_fail, dfix_empty);
+
+ ptu_run_fp(suite, indir, dfix_indir, pt_ipc_suppressed);
+ ptu_run_fp(suite, indir, dfix_indir, pt_ipc_update_16);
+ ptu_run_fp(suite, indir, dfix_indir, pt_ipc_update_32);
+ ptu_run_fp(suite, indir, dfix_indir, pt_ipc_update_48);
+ ptu_run_fp(suite, indir, dfix_indir, pt_ipc_sext_48);
+ ptu_run_fp(suite, indir, dfix_indir, pt_ipc_full);
+ ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_suppressed);
+ ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_update_16);
+ ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_update_32);
+ ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_update_48);
+ ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_sext_48);
+ ptu_run_fp(suite, indir_tnt, dfix_indir, pt_ipc_full);
+ ptu_run_f(suite, indir_cutoff_fail, dfix_indir);
+ ptu_run_f(suite, indir_skip_tnt_fail, dfix_indir);
+ ptu_run_f(suite, indir_skip_tip_pge_fail, dfix_indir);
+ ptu_run_f(suite, indir_skip_tip_pgd_fail, dfix_indir);
+ ptu_run_f(suite, indir_skip_fup_tip_fail, dfix_indir);
+ ptu_run_f(suite, indir_skip_fup_tip_pgd_fail, dfix_indir);
+
+ ptu_run_fp(suite, indir, dfix_indir_psb, pt_ipc_suppressed);
+ ptu_run_fp(suite, indir, dfix_indir_psb, pt_ipc_sext_48);
+ ptu_run_fp(suite, indir, dfix_indir_psb, pt_ipc_full);
+ ptu_run_fp(suite, indir_tnt, dfix_indir_psb, pt_ipc_suppressed);
+ ptu_run_fp(suite, indir_tnt, dfix_indir_psb, pt_ipc_sext_48);
+ ptu_run_fp(suite, indir_tnt, dfix_indir_psb, pt_ipc_full);
+ ptu_run_f(suite, indir_cutoff_fail, dfix_indir_psb);
+ ptu_run_f(suite, indir_skip_tnt_fail, dfix_indir_psb);
+ ptu_run_f(suite, indir_skip_tip_pge_fail, dfix_indir_psb);
+ ptu_run_f(suite, indir_skip_tip_pgd_fail, dfix_indir_psb);
+ ptu_run_f(suite, indir_skip_fup_tip_fail, dfix_indir_psb);
+ ptu_run_f(suite, indir_skip_fup_tip_pgd_fail, dfix_indir_psb);
+
+ ptu_run_f(suite, cond_null, dfix_empty);
+ ptu_run_f(suite, cond_empty, dfix_empty);
+ ptu_run_f(suite, cond, dfix_empty);
+ ptu_run_f(suite, cond_skip_tip_fail, dfix_empty);
+ ptu_run_f(suite, cond_skip_tip_pge_fail, dfix_empty);
+ ptu_run_f(suite, cond_skip_tip_pgd_fail, dfix_empty);
+ ptu_run_f(suite, cond_skip_fup_tip_fail, dfix_empty);
+ ptu_run_f(suite, cond_skip_fup_tip_pgd_fail, dfix_empty);
+
+ ptu_run_f(suite, cond, dfix_cond);
+ ptu_run_f(suite, cond_skip_tip_fail, dfix_cond);
+ ptu_run_f(suite, cond_skip_tip_pge_fail, dfix_cond);
+ ptu_run_f(suite, cond_skip_tip_pgd_fail, dfix_cond);
+ ptu_run_f(suite, cond_skip_fup_tip_fail, dfix_cond);
+ ptu_run_f(suite, cond_skip_fup_tip_pgd_fail, dfix_cond);
+
+ ptu_run_f(suite, event_null, dfix_empty);
+ ptu_run_f(suite, event_bad_size, dfix_empty);
+ ptu_run_f(suite, event_small_size, dfix_empty);
+ ptu_run_f(suite, event_big_size, dfix_empty);
+ ptu_run_f(suite, event_empty, dfix_empty);
+ ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_suppressed, 0);
+ ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_update_16, 0);
+ ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_update_32, 0);
+ ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_update_48, 0);
+ ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_sext_48, 0);
+ ptu_run_fp(suite, event_enabled, dfix_empty, pt_ipc_full, 0);
+ ptu_run_f(suite, event_enabled_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_suppressed, 0);
+ ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_update_16, 0);
+ ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_update_32, 0);
+ ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_update_48, 0);
+ ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_sext_48, 0);
+ ptu_run_fp(suite, event_disabled, dfix_empty, pt_ipc_full, 0);
+ ptu_run_f(suite, event_disabled_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_suppressed,
+ 0);
+ ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_update_16,
+ 0);
+ ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_update_32,
+ 0);
+ ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_update_48,
+ 0);
+ ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_sext_48, 0);
+ ptu_run_fp(suite, event_async_disabled, dfix_empty, pt_ipc_full, 0);
+ ptu_run_f(suite, event_async_disabled_suppressed_fail, dfix_empty);
+ ptu_run_f(suite, event_async_disabled_cutoff_fail_a, dfix_empty);
+ ptu_run_f(suite, event_async_disabled_cutoff_fail_b, dfix_empty);
+ ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_suppressed, 0);
+ ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_update_16, 0);
+ ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_update_32, 0);
+ ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_update_48, 0);
+ ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_sext_48, 0);
+ ptu_run_fp(suite, event_async_branch, dfix_empty, pt_ipc_full, 0);
+ ptu_run_f(suite, event_async_branch_suppressed_fail, dfix_empty);
+ ptu_run_f(suite, event_async_branch_cutoff_fail_a, dfix_empty);
+ ptu_run_f(suite, event_async_branch_cutoff_fail_b, dfix_empty);
+ ptu_run_fp(suite, event_paging, dfix_empty, 0, 0);
+ ptu_run_fp(suite, event_paging, dfix_empty, pt_pl_pip_nr, 0);
+ ptu_run_f(suite, event_paging_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_async_paging, dfix_empty, 0, 0);
+ ptu_run_fp(suite, event_async_paging, dfix_empty, pt_pl_pip_nr, 0);
+ ptu_run_fp(suite, event_async_paging_suppressed, dfix_empty, 0, 0);
+ ptu_run_fp(suite, event_async_paging_suppressed, dfix_empty,
+ pt_pl_pip_nr, 0);
+ ptu_run_f(suite, event_async_paging_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_suppressed, 0);
+ ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_update_16, 0);
+ ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_update_32, 0);
+ ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_update_48, 0);
+ ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_sext_48, 0);
+ ptu_run_fp(suite, event_overflow_fup, dfix_empty, pt_ipc_full, 0);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty,
+ pt_ipc_suppressed, 0);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_update_16,
+ 0);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_update_32,
+ 0);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_update_48,
+ 0);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_sext_48,
+ 0);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_empty, pt_ipc_full,
+ 0);
+ ptu_run_f(suite, event_overflow_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_stop, dfix_empty, 0);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_suppressed,
+ 0);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_update_16, 0);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_update_32, 0);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_update_48, 0);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_sext_48, 0);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_empty, pt_ipc_full, 0);
+ ptu_run_f(suite, event_exec_mode_tip_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty,
+ pt_ipc_suppressed, 0);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty,
+ pt_ipc_update_16, 0);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty,
+ pt_ipc_update_32, 0);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty,
+ pt_ipc_update_48, 0);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty, pt_ipc_sext_48,
+ 0);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_empty, pt_ipc_full,
+ 0);
+ ptu_run_f(suite, event_exec_mode_tip_pge_cutoff_fail, dfix_empty);
+ ptu_run_f(suite, event_exec_mode_cutoff_fail, dfix_empty);
+ ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_suppressed,
+ pt_mob_tsx_intx, 0);
+ ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_update_16, 0, 0);
+ ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_update_32,
+ pt_mob_tsx_intx, 0);
+ ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_update_48,
+ pt_mob_tsx_intx, 0);
+ ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_sext_48, 0, 0);
+ ptu_run_fp(suite, event_tsx_fup, dfix_empty, pt_ipc_full, 0, 0);
+ ptu_run_f(suite, event_tsx_fup_cutoff_fail, dfix_empty);
+ ptu_run_f(suite, event_tsx_cutoff_fail, dfix_empty);
+ ptu_run_f(suite, event_skip_tip_fail, dfix_empty);
+ ptu_run_f(suite, event_skip_tnt_8_fail, dfix_empty);
+ ptu_run_f(suite, event_skip_tnt_64_fail, dfix_empty);
+ ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_suppressed);
+ ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_update_16);
+ ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_update_32);
+ ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_update_48);
+ ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_sext_48);
+ ptu_run_fp(suite, sync_event, dfix_empty, pt_ipc_full);
+ ptu_run_f(suite, sync_event_cutoff_fail, dfix_empty);
+ ptu_run_f(suite, sync_event_incomplete_fail, dfix_empty);
+ ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_suppressed);
+ ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_update_16);
+ ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_update_32);
+ ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_update_48);
+ ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_sext_48);
+ ptu_run_fp(suite, sync_ovf_event, dfix_empty, pt_ipc_full);
+ ptu_run_f(suite, sync_ovf_event_cutoff_fail, dfix_empty);
+
+ ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_suppressed, 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_update_16, 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_update_32, 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_update_48, 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_sext_48, 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event, pt_ipc_full, 0x1000);
+ ptu_run_f(suite, event_enabled_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_update_16, 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_update_32, 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_update_48, 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_sext_48, 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event, pt_ipc_full, 0x1000);
+ ptu_run_f(suite, event_disabled_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_update_16,
+ 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_update_32,
+ 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_update_48,
+ 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_async_disabled_suppressed_fail, dfix_event);
+ ptu_run_f(suite, event_async_disabled_cutoff_fail_a, dfix_event);
+ ptu_run_f(suite, event_async_disabled_cutoff_fail_b, dfix_event);
+ ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_update_16,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_update_32,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_update_48,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_async_branch_suppressed_fail, dfix_event);
+ ptu_run_f(suite, event_async_branch_cutoff_fail_a, dfix_event);
+ ptu_run_f(suite, event_async_branch_cutoff_fail_b, dfix_event);
+ ptu_run_fp(suite, event_paging, dfix_event, 0, 0x1000);
+ ptu_run_fp(suite, event_paging, dfix_event, pt_pl_pip_nr, 0x1000);
+ ptu_run_f(suite, event_paging_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_async_paging, dfix_event, 0, 0x1000);
+ ptu_run_fp(suite, event_async_paging, dfix_event, pt_pl_pip_nr, 0x1000);
+ ptu_run_fp(suite, event_async_paging_suppressed, dfix_event, 0, 0x1000);
+ ptu_run_fp(suite, event_async_paging_suppressed, dfix_event,
+ pt_pl_pip_nr, 0x1000);
+ ptu_run_f(suite, event_async_paging_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_update_16,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_update_32,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_update_48,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_fup, dfix_event, pt_ipc_full,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_event,
+ pt_ipc_suppressed, 0x1000);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_update_16,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_update_32,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_update_48,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_overflow_tip_pge, dfix_event, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_overflow_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_stop, dfix_event, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_update_16,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_update_32,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_update_48,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_exec_mode_tip_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event,
+ pt_ipc_suppressed, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event,
+ pt_ipc_update_16, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event,
+ pt_ipc_update_32, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event,
+ pt_ipc_update_48, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_exec_mode_tip_pge_cutoff_fail, dfix_event);
+ ptu_run_f(suite, event_exec_mode_cutoff_fail, dfix_event);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_suppressed, 0,
+ 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_update_16,
+ pt_mob_tsx_intx, 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_update_32, 0,
+ 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_update_48, 0,
+ 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_sext_48,
+ pt_mob_tsx_intx, 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event, pt_ipc_full,
+ pt_mob_tsx_intx, 0x1000);
+ ptu_run_f(suite, event_tsx_fup_cutoff_fail, dfix_event);
+ ptu_run_f(suite, event_tsx_cutoff_fail, dfix_event);
+ ptu_run_f(suite, event_skip_tip_fail, dfix_event);
+ ptu_run_f(suite, event_skip_tnt_8_fail, dfix_event);
+ ptu_run_f(suite, event_skip_tnt_64_fail, dfix_event);
+ ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_suppressed);
+ ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_update_16);
+ ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_update_32);
+ ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_update_48);
+ ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_sext_48);
+ ptu_run_fp(suite, sync_event, dfix_event, pt_ipc_full);
+ ptu_run_f(suite, sync_event_cutoff_fail, dfix_event);
+ ptu_run_f(suite, sync_event_incomplete_fail, dfix_event);
+ ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_suppressed);
+ ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_update_16);
+ ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_update_32);
+ ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_update_48);
+ ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_sext_48);
+ ptu_run_fp(suite, sync_ovf_event, dfix_event, pt_ipc_full);
+ ptu_run_f(suite, sync_ovf_event_cutoff_fail, dfix_event);
+
+ ptu_run_fp(suite, event_enabled, dfix_event_psb, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event_psb, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_enabled, dfix_event_psb, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_enabled_cutoff_fail, dfix_event_psb);
+ ptu_run_fp(suite, event_disabled, dfix_event_psb, pt_ipc_suppressed,
+ 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event_psb, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_disabled, dfix_event_psb, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_disabled_cutoff_fail, dfix_event_psb);
+ ptu_run_fp(suite, event_async_disabled, dfix_event_psb,
+ pt_ipc_suppressed, 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event_psb,
+ pt_ipc_update_16, 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event_psb,
+ pt_ipc_update_32, 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event_psb,
+ pt_ipc_update_48, 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event_psb,
+ pt_ipc_sext_48, 0x1000);
+ ptu_run_fp(suite, event_async_disabled, dfix_event_psb,
+ pt_ipc_full, 0x1000);
+ ptu_run_f(suite, event_async_disabled_suppressed_fail, dfix_event_psb);
+ ptu_run_f(suite, event_async_disabled_cutoff_fail_a, dfix_event_psb);
+ ptu_run_f(suite, event_async_disabled_cutoff_fail_b, dfix_event_psb);
+ ptu_run_fp(suite, event_async_branch, dfix_event_psb,
+ pt_ipc_suppressed, 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_update_16,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_update_32,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_update_48,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_async_branch, dfix_event_psb, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_async_branch_suppressed_fail, dfix_event_psb);
+ ptu_run_f(suite, event_async_branch_cutoff_fail_a, dfix_event_psb);
+ ptu_run_f(suite, event_async_branch_cutoff_fail_b, dfix_event_psb);
+ ptu_run_fp(suite, event_paging, dfix_event_psb, 0, 0x1000);
+ ptu_run_fp(suite, event_paging, dfix_event_psb, pt_pl_pip_nr, 0x1000);
+ ptu_run_f(suite, event_paging_cutoff_fail, dfix_event_psb);
+ ptu_run_fp(suite, event_async_paging, dfix_event_psb, 0, 0x1000);
+ ptu_run_fp(suite, event_async_paging, dfix_event_psb, pt_pl_pip_nr,
+ 0x1000);
+ ptu_run_fp(suite, event_async_paging_suppressed, dfix_event_psb, 0,
+ 0x1000);
+ ptu_run_fp(suite, event_async_paging_suppressed, dfix_event_psb,
+ pt_pl_pip_nr, 0x1000);
+ ptu_run_f(suite, event_async_paging_cutoff_fail, dfix_event_psb);
+ ptu_run_f(suite, event_overflow_cutoff_fail, dfix_event_psb);
+ ptu_run_fp(suite, event_stop, dfix_event_psb, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event_psb,
+ pt_ipc_suppressed, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event_psb, pt_ipc_sext_48,
+ 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip, dfix_event_psb, pt_ipc_full,
+ 0x1000);
+ ptu_run_f(suite, event_exec_mode_tip_cutoff_fail, dfix_event_psb);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event_psb,
+ pt_ipc_sext_48, 0x1000);
+ ptu_run_fp(suite, event_exec_mode_tip_pge, dfix_event_psb,
+ pt_ipc_full, 0x1000);
+ ptu_run_f(suite, event_exec_mode_tip_pge_cutoff_fail, dfix_event_psb);
+ ptu_run_f(suite, event_exec_mode_cutoff_fail, dfix_event_psb);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event_psb, pt_ipc_suppressed, 0,
+ 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event_psb, pt_ipc_sext_48,
+ pt_mob_tsx_intx, 0x1000);
+ ptu_run_fp(suite, event_tsx_fup, dfix_event_psb, pt_ipc_full,
+ pt_mob_tsx_intx, 0x1000);
+ ptu_run_f(suite, event_tsx_fup_cutoff_fail, dfix_event_psb);
+ ptu_run_f(suite, event_tsx_cutoff_fail, dfix_event_psb);
+ ptu_run_f(suite, event_skip_tip_fail, dfix_event_psb);
+ ptu_run_f(suite, event_skip_tnt_8_fail, dfix_event_psb);
+ ptu_run_f(suite, event_skip_tnt_64_fail, dfix_event_psb);
+
+ ptu_run_f(suite, time_null_fail, dfix_empty);
+ ptu_run_f(suite, time_initial, dfix_empty);
+ ptu_run_f(suite, time, dfix_empty);
+
+ ptu_run_f(suite, cbr_null, dfix_empty);
+ ptu_run_f(suite, cbr_initial, dfix_empty);
+ ptu_run_f(suite, cbr, dfix_empty);
+
+ ptu_run_f(suite, indir_cyc_cutoff, dfix_empty);
+ ptu_run_f(suite, cond_cyc_cutoff, dfix_empty);
+ ptu_run_f(suite, event_cyc_cutoff, dfix_empty);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-retstack.c b/contrib/processor-trace/libipt/test/src/ptunit-retstack.c
new file mode 100644
index 0000000000000..743eee7b127d0
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-retstack.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_retstack.h"
+
+#include "intel-pt.h"
+
+
+static struct ptunit_result init(void)
+{
+ struct pt_retstack retstack;
+ int status;
+
+ memset(&retstack, 0xcd, sizeof(retstack));
+
+ pt_retstack_init(&retstack);
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_ne(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_null(void)
+{
+ pt_retstack_init(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query(void)
+{
+ struct pt_retstack retstack;
+ uint64_t ip;
+ int status;
+
+ pt_retstack_init(&retstack);
+
+ status = pt_retstack_push(&retstack, 0x42ull);
+ ptu_int_eq(status, 0);
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_eq(status, 0);
+
+ status = pt_retstack_pop(&retstack, &ip);
+ ptu_int_eq(status, 0);
+ ptu_uint_eq(ip, 0x42ull);
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_ne(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_empty(void)
+{
+ struct pt_retstack retstack;
+ uint64_t ip;
+ int status;
+
+ pt_retstack_init(&retstack);
+
+ ip = 0x42ull;
+ status = pt_retstack_pop(&retstack, &ip);
+ ptu_int_eq(status, -pte_retstack_empty);
+ ptu_uint_eq(ip, 0x42ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_null(void)
+{
+ uint64_t ip;
+ int status;
+
+ ip = 0x42ull;
+ status = pt_retstack_pop(NULL, &ip);
+ ptu_int_eq(status, -pte_invalid);
+ ptu_uint_eq(ip, 0x42ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pop(void)
+{
+ struct pt_retstack retstack;
+ int status;
+
+ pt_retstack_init(&retstack);
+
+ status = pt_retstack_push(&retstack, 0x42ull);
+ ptu_int_eq(status, 0);
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_eq(status, 0);
+
+ status = pt_retstack_pop(&retstack, NULL);
+ ptu_int_eq(status, 0);
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_ne(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pop_empty(void)
+{
+ struct pt_retstack retstack;
+ int status;
+
+ pt_retstack_init(&retstack);
+
+ status = pt_retstack_pop(&retstack, NULL);
+ ptu_int_eq(status, -pte_retstack_empty);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result pop_null(void)
+{
+ int status;
+
+ status = pt_retstack_pop(NULL, NULL);
+ ptu_int_eq(status, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result full(void)
+{
+ struct pt_retstack retstack;
+ uint64_t ip, idx;
+ int status;
+
+ pt_retstack_init(&retstack);
+
+ for (idx = 0; idx < pt_retstack_size; ++idx) {
+ status = pt_retstack_push(&retstack, idx);
+ ptu_int_eq(status, 0);
+ }
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_eq(status, 0);
+
+ for (idx = pt_retstack_size; idx > 0;) {
+ idx -= 1;
+
+ status = pt_retstack_pop(&retstack, &ip);
+ ptu_int_eq(status, 0);
+ ptu_uint_eq(ip, idx);
+ }
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_ne(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result overflow(void)
+{
+ struct pt_retstack retstack;
+ uint64_t ip, idx;
+ int status;
+
+ pt_retstack_init(&retstack);
+
+ for (idx = 0; idx <= pt_retstack_size; ++idx) {
+ status = pt_retstack_push(&retstack, idx);
+ ptu_int_eq(status, 0);
+ }
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_eq(status, 0);
+
+ for (idx = pt_retstack_size; idx > 0; --idx) {
+ status = pt_retstack_pop(&retstack, &ip);
+ ptu_int_eq(status, 0);
+ ptu_uint_eq(ip, idx);
+ }
+
+ status = pt_retstack_is_empty(&retstack);
+ ptu_int_ne(status, 0);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init);
+ ptu_run(suite, init_null);
+ ptu_run(suite, query);
+ ptu_run(suite, query_empty);
+ ptu_run(suite, query_null);
+ ptu_run(suite, pop);
+ ptu_run(suite, pop_empty);
+ ptu_run(suite, pop_null);
+ ptu_run(suite, full);
+ ptu_run(suite, overflow);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-section-file.c b/contrib/processor-trace/libipt/test/src/ptunit-section-file.c
new file mode 100644
index 0000000000000..753de13a3d211
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-section-file.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_section.h"
+#include "pt_section_file.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+
+
+/* This is a variation of ptunit-section.c.
+ *
+ * We provide pt_section_map() et.al. that are normally provided by mmap-based
+ * section implementations. Our implementation falls back to file-based
+ * sections so we're able to test them.
+ *
+ * The actual test is in ptunit-section.c.
+ */
+
+/* The file status used for detecting changes to a file between unmap and map.
+ *
+ * In our case, the changes always affect the size of the file.
+ */
+struct pt_file_status {
+ /* The size in bytes. */
+ long size;
+};
+
+int pt_section_mk_status(void **pstatus, uint64_t *psize, const char *filename)
+{
+ struct pt_file_status *status;
+ FILE *file;
+ long size;
+ int errcode;
+
+ if (!pstatus || !psize)
+ return -pte_internal;
+
+ file = fopen(filename, "rb");
+ if (!file)
+ return -pte_bad_image;
+
+ errcode = fseek(file, 0, SEEK_END);
+ if (errcode) {
+ errcode = -pte_bad_image;
+ goto out_file;
+ }
+
+ size = ftell(file);
+ if (size < 0) {
+ errcode = -pte_bad_image;
+ goto out_file;
+ }
+
+ status = malloc(sizeof(*status));
+ if (!status) {
+ errcode = -pte_nomem;
+ goto out_file;
+ }
+
+ status->size = size;
+
+ *pstatus = status;
+ *psize = (uint64_t) size;
+
+ errcode = 0;
+
+out_file:
+ fclose(file);
+ return errcode;
+}
+
+static int pt_section_map_success(struct pt_section *section)
+{
+ uint16_t mcount;
+ int errcode, status;
+
+ if (!section)
+ return -pte_internal;
+
+ mcount = section->mcount + 1;
+ if (!mcount) {
+ (void) pt_section_unlock(section);
+ return -pte_overflow;
+ }
+
+ section->mcount = mcount;
+
+ errcode = pt_section_unlock(section);
+ if (errcode < 0)
+ return errcode;
+
+ status = pt_section_on_map(section);
+ if (status < 0) {
+ (void) pt_section_unmap(section);
+ return status;
+ }
+
+ return 0;
+}
+
+int pt_section_map(struct pt_section *section)
+{
+ struct pt_file_status *status;
+ const char *filename;
+ uint16_t mcount;
+ FILE *file;
+ long size;
+ int errcode;
+
+ if (!section)
+ return -pte_internal;
+
+ errcode = pt_section_lock(section);
+ if (errcode < 0)
+ return errcode;
+
+ mcount = section->mcount;
+ if (mcount)
+ return pt_section_map_success(section);
+
+ if (section->mapping)
+ goto out_unlock;
+
+ filename = section->filename;
+ if (!filename)
+ goto out_unlock;
+
+ status = section->status;
+ if (!status)
+ goto out_unlock;
+
+ errcode = -pte_bad_image;
+ file = fopen(filename, "rb");
+ if (!file)
+ goto out_unlock;
+
+ errcode = fseek(file, 0, SEEK_END);
+ if (errcode) {
+ errcode = -pte_bad_image;
+ goto out_file;
+ }
+
+ errcode = -pte_bad_image;
+ size = ftell(file);
+ if (size < 0)
+ goto out_file;
+
+ if (size != status->size)
+ goto out_file;
+
+ /* We need to keep the file open on success. It will be closed when
+ * the section is unmapped.
+ */
+ errcode = pt_sec_file_map(section, file);
+ if (!errcode)
+ return pt_section_map_success(section);
+
+out_file:
+ fclose(file);
+
+out_unlock:
+ (void) pt_section_unlock(section);
+ return errcode;
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-section.c b/contrib/processor-trace/libipt/test/src/ptunit-section.c
new file mode 100644
index 0000000000000..058bf853589ed
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-section.c
@@ -0,0 +1,1396 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit_threads.h"
+#include "ptunit_mkfile.h"
+
+#include "pt_section.h"
+#include "pt_block_cache.h"
+
+#include "intel-pt.h"
+
+#include <stdlib.h>
+#include <stdio.h>
+
+
+
+struct pt_image_section_cache {
+ int map;
+};
+
+extern int pt_iscache_notify_map(struct pt_image_section_cache *iscache,
+ struct pt_section *section);
+extern int pt_iscache_notify_resize(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t size);
+
+int pt_iscache_notify_map(struct pt_image_section_cache *iscache,
+ struct pt_section *section)
+{
+ if (!iscache)
+ return -pte_internal;
+
+ if (iscache->map <= 0)
+ return iscache->map;
+
+ /* Avoid recursion. */
+ iscache->map = 0;
+
+ return pt_section_map_share(section);
+}
+
+int pt_iscache_notify_resize(struct pt_image_section_cache *iscache,
+ struct pt_section *section, uint64_t size)
+{
+ uint64_t memsize;
+ int errcode;
+
+ if (!iscache)
+ return -pte_internal;
+
+ if (iscache->map <= 0)
+ return iscache->map;
+
+ /* Avoid recursion. */
+ iscache->map = 0;
+
+ errcode = pt_section_memsize(section, &memsize);
+ if (errcode < 0)
+ return errcode;
+
+ if (size != memsize)
+ return -pte_internal;
+
+ return pt_section_map_share(section);
+}
+
+struct pt_block_cache *pt_bcache_alloc(uint64_t nentries)
+{
+ struct pt_block_cache *bcache;
+
+ if (!nentries || (UINT32_MAX < nentries))
+ return NULL;
+
+ /* The cache is not really used by tests. It suffices to allocate only
+ * the cache struct with the single default entry.
+ *
+ * We still set the number of entries to the requested size.
+ */
+ bcache = malloc(sizeof(*bcache));
+ if (bcache)
+ bcache->nentries = (uint32_t) nentries;
+
+ return bcache;
+}
+
+void pt_bcache_free(struct pt_block_cache *bcache)
+{
+ free(bcache);
+}
+
+/* A test fixture providing a temporary file and an initially NULL section. */
+struct section_fixture {
+ /* Threading support. */
+ struct ptunit_thrd_fixture thrd;
+
+ /* A temporary file name. */
+ char *name;
+
+ /* That file opened for writing. */
+ FILE *file;
+
+ /* The section. */
+ struct pt_section *section;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct section_fixture *);
+ struct ptunit_result (*fini)(struct section_fixture *);
+};
+
+enum {
+#if defined(FEATURE_THREADS)
+
+ num_threads = 4,
+
+#endif /* defined(FEATURE_THREADS) */
+
+ num_work = 0x4000
+};
+
+static struct ptunit_result sfix_write_aux(struct section_fixture *sfix,
+ const uint8_t *buffer, size_t size)
+{
+ size_t written;
+
+ written = fwrite(buffer, 1, size, sfix->file);
+ ptu_uint_eq(written, size);
+
+ fflush(sfix->file);
+
+ return ptu_passed();
+}
+
+#define sfix_write(sfix, buffer) \
+ ptu_check(sfix_write_aux, sfix, buffer, sizeof(buffer))
+
+static struct ptunit_result create(struct section_fixture *sfix)
+{
+ const char *name;
+ uint8_t bytes[] = { 0xcc, 0xcc, 0xcc, 0xcc, 0xcc };
+ uint64_t offset, size;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ name = pt_section_filename(sfix->section);
+ ptu_str_eq(name, sfix->name);
+
+ offset = pt_section_offset(sfix->section);
+ ptu_uint_eq(offset, 0x1ull);
+
+ size = pt_section_size(sfix->section);
+ ptu_uint_eq(size, 0x3ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result create_bad_offset(struct section_fixture *sfix)
+{
+ sfix->section = pt_mk_section(sfix->name, 0x10ull, 0x0ull);
+ ptu_null(sfix->section);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result create_truncated(struct section_fixture *sfix)
+{
+ const char *name;
+ uint8_t bytes[] = { 0xcc, 0xcc, 0xcc, 0xcc, 0xcc };
+ uint64_t offset, size;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, UINT64_MAX);
+ ptu_ptr(sfix->section);
+
+ name = pt_section_filename(sfix->section);
+ ptu_str_eq(name, sfix->name);
+
+ offset = pt_section_offset(sfix->section);
+ ptu_uint_eq(offset, 0x1ull);
+
+ size = pt_section_size(sfix->section);
+ ptu_uint_eq(size, sizeof(bytes) - 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result create_empty(struct section_fixture *sfix)
+{
+ sfix->section = pt_mk_section(sfix->name, 0x0ull, 0x10ull);
+ ptu_null(sfix->section);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result filename_null(void)
+{
+ const char *name;
+
+ name = pt_section_filename(NULL);
+ ptu_null(name);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result size_null(void)
+{
+ uint64_t size;
+
+ size = pt_section_size(NULL);
+ ptu_uint_eq(size, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result memsize_null(struct section_fixture *sfix)
+{
+ uint64_t size;
+ int errcode;
+
+ errcode = pt_section_memsize(NULL, &size);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_memsize(sfix->section, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_memsize(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result offset_null(void)
+{
+ uint64_t offset;
+
+ offset = pt_section_offset(NULL);
+ ptu_uint_eq(offset, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result get_null(void)
+{
+ int errcode;
+
+ errcode = pt_section_get(NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result put_null(void)
+{
+ int errcode;
+
+ errcode = pt_section_put(NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_null(void)
+{
+ struct pt_image_section_cache iscache;
+ struct pt_section section;
+ int errcode;
+
+ errcode = pt_section_attach(NULL, &iscache);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_attach(&section, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_attach(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result detach_null(void)
+{
+ struct pt_image_section_cache iscache;
+ struct pt_section section;
+ int errcode;
+
+ errcode = pt_section_detach(NULL, &iscache);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_detach(&section, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_detach(NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result map_null(void)
+{
+ int errcode;
+
+ errcode = pt_section_map(NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unmap_null(void)
+{
+ int errcode;
+
+ errcode = pt_section_unmap(NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cache_null(void)
+{
+ struct pt_block_cache *bcache;
+
+ bcache = pt_section_bcache(NULL);
+ ptu_null(bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result get_overflow(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix->section->ucount = UINT16_MAX;
+
+ errcode = pt_section_get(sfix->section);
+ ptu_int_eq(errcode, -pte_overflow);
+
+ sfix->section->ucount = 1;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_overflow(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix->section->acount = UINT16_MAX;
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, -pte_overflow);
+
+ sfix->section->acount = 0;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_bad_ucount(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix->section->acount = 2;
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, -pte_internal);
+
+ sfix->section->acount = 0;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result map_change(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix_write(sfix, bytes);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, -pte_bad_image);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result map_put(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_put(sfix->section);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result unmap_nomap(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, -pte_nomap);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result map_overflow(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix->section->mcount = UINT16_MAX;
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, -pte_overflow);
+
+ sfix->section->mcount = 0;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result get_put(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_get(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_get(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_put(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_put(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_detach(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix->section->ucount += 2;
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ sfix->section->ucount -= 2;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_bad_iscache(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache, bad;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ sfix->section->ucount += 2;
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_attach(sfix->section, &bad);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ sfix->section->ucount -= 2;
+
+ return ptu_passed();
+}
+
+static struct ptunit_result detach_bad_iscache(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache, bad;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_detach(sfix->section, &bad);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result map_unmap(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_map(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ iscache.map = 0;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ ptu_uint_eq(sfix->section->mcount, 2);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_bad_map(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ iscache.map = -pte_eos;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, -pte_eos);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result attach_map_overflow(struct section_fixture *sfix)
+{
+ struct pt_image_section_cache iscache;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ iscache.map = 1;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_attach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ sfix->section->mcount = UINT16_MAX - 1;
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, -pte_overflow);
+
+ errcode = pt_section_detach(sfix->section, &iscache);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x0ull);
+ ptu_int_eq(status, 2);
+ ptu_uint_eq(buffer[0], bytes[1]);
+ ptu_uint_eq(buffer[1], bytes[2]);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_null(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ uint8_t buffer[] = { 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, NULL, 1, 0x0ull);
+ ptu_int_eq(status, -pte_internal);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ status = pt_section_read(NULL, buffer, 1, 0x0ull);
+ ptu_int_eq(status, -pte_internal);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_offset(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x1ull);
+ ptu_int_eq(status, 2);
+ ptu_uint_eq(buffer[0], bytes[2]);
+ ptu_uint_eq(buffer[1], bytes[3]);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_truncated(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc, 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x2ull);
+ ptu_int_eq(status, 1);
+ ptu_uint_eq(buffer[0], bytes[3]);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_from_truncated(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc, 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x2ull, 0x10ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x1ull);
+ ptu_int_eq(status, 1);
+ ptu_uint_eq(buffer[0], bytes[3]);
+ ptu_uint_eq(buffer[1], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_nomem(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 1, 0x3ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_overflow(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 1,
+ 0xffffffffffff0000ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_overflow_32bit(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 1,
+ 0xff00000000ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_nomap(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 }, buffer[] = { 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_read(sfix->section, buffer, 1, 0x0ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_uint_eq(buffer[0], 0xcc);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result read_unmap_map(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int status;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x0ull);
+ ptu_int_eq(status, 2);
+ ptu_uint_eq(buffer[0], bytes[1]);
+ ptu_uint_eq(buffer[1], bytes[2]);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ memset(buffer, 0xcc, sizeof(buffer));
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x0ull);
+ ptu_int_eq(status, -pte_nomap);
+ ptu_uint_eq(buffer[0], 0xcc);
+ ptu_uint_eq(buffer[1], 0xcc);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_section_map(sfix->section);
+ ptu_int_eq(status, 0);
+
+ status = pt_section_read(sfix->section, buffer, 2, 0x0ull);
+ ptu_int_eq(status, 2);
+ ptu_uint_eq(buffer[0], bytes[1]);
+ ptu_uint_eq(buffer[1], bytes[2]);
+ ptu_uint_eq(buffer[2], 0xcc);
+
+ status = pt_section_unmap(sfix->section);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static int worker_read(void *arg)
+{
+ struct section_fixture *sfix;
+ int it, errcode;
+
+ sfix = arg;
+ if (!sfix)
+ return -pte_internal;
+
+ for (it = 0; it < num_work; ++it) {
+ uint8_t buffer[] = { 0xcc, 0xcc, 0xcc };
+ int read;
+
+ errcode = pt_section_get(sfix->section);
+ if (errcode < 0)
+ return errcode;
+
+ errcode = pt_section_map(sfix->section);
+ if (errcode < 0)
+ goto out_put;
+
+ read = pt_section_read(sfix->section, buffer, 2, 0x0ull);
+ if (read < 0)
+ goto out_unmap;
+
+ errcode = -pte_invalid;
+ if ((read != 2) || (buffer[0] != 0x2) || (buffer[1] != 0x4))
+ goto out_unmap;
+
+ errcode = pt_section_unmap(sfix->section);
+ if (errcode < 0)
+ goto out_put;
+
+ errcode = pt_section_put(sfix->section);
+ if (errcode < 0)
+ return errcode;
+ }
+
+ return 0;
+
+out_unmap:
+ (void) pt_section_unmap(sfix->section);
+
+out_put:
+ (void) pt_section_put(sfix->section);
+ return errcode;
+}
+
+static int worker_bcache(void *arg)
+{
+ struct section_fixture *sfix;
+ int it, errcode;
+
+ sfix = arg;
+ if (!sfix)
+ return -pte_internal;
+
+ errcode = pt_section_get(sfix->section);
+ if (errcode < 0)
+ return errcode;
+
+ for (it = 0; it < num_work; ++it) {
+ struct pt_block_cache *bcache;
+
+ errcode = pt_section_map(sfix->section);
+ if (errcode < 0)
+ goto out_put;
+
+ errcode = pt_section_request_bcache(sfix->section);
+ if (errcode < 0)
+ goto out_unmap;
+
+ bcache = pt_section_bcache(sfix->section);
+ if (!bcache) {
+ errcode = -pte_nomem;
+ goto out_unmap;
+ }
+
+ errcode = pt_section_unmap(sfix->section);
+ if (errcode < 0)
+ goto out_put;
+ }
+
+ return pt_section_put(sfix->section);
+
+out_unmap:
+ (void) pt_section_unmap(sfix->section);
+
+out_put:
+ (void) pt_section_put(sfix->section);
+ return errcode;
+}
+
+static struct ptunit_result stress(struct section_fixture *sfix,
+ int (*worker)(void *))
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+#if defined(FEATURE_THREADS)
+ {
+ int thrd;
+
+ for (thrd = 0; thrd < num_threads; ++thrd)
+ ptu_test(ptunit_thrd_create, &sfix->thrd, worker, sfix);
+ }
+#endif /* defined(FEATURE_THREADS) */
+
+ errcode = worker(sfix);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_no_bcache(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ struct pt_block_cache *bcache;
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ bcache = pt_section_bcache(sfix->section);
+ ptu_null(bcache);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bcache_alloc_free(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ struct pt_block_cache *bcache;
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_alloc_bcache(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ bcache = pt_section_bcache(sfix->section);
+ ptu_ptr(bcache);
+ ptu_uint_eq(bcache->nentries, sfix->section->size);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ bcache = pt_section_bcache(sfix->section);
+ ptu_null(bcache);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bcache_alloc_twice(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_alloc_bcache(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_alloc_bcache(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result bcache_alloc_nomap(struct section_fixture *sfix)
+{
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_alloc_bcache(sfix->section);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result memsize_nomap(struct section_fixture *sfix)
+{
+ uint64_t memsize;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_memsize(sfix->section, &memsize);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(memsize, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result memsize_unmap(struct section_fixture *sfix)
+{
+ uint64_t memsize;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_memsize(sfix->section, &memsize);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(memsize, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result memsize_map_nobcache(struct section_fixture *sfix)
+{
+ uint64_t memsize;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ memsize = 0xfefefefefefefefeull;
+
+ errcode = pt_section_memsize(sfix->section, &memsize);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_ge(memsize, 0ull);
+ ptu_uint_le(memsize, 0x2000ull);
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result memsize_map_bcache(struct section_fixture *sfix)
+{
+ uint64_t memsize;
+ uint8_t bytes[] = { 0xcc, 0x2, 0x4, 0x6 };
+ int errcode;
+
+ sfix_write(sfix, bytes);
+
+ sfix->section = pt_mk_section(sfix->name, 0x1ull, 0x3ull);
+ ptu_ptr(sfix->section);
+
+ errcode = pt_section_map(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_alloc_bcache(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_section_memsize(sfix->section, &memsize);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_ge(memsize,
+ sfix->section->size * sizeof(struct pt_bcache_entry));
+
+ errcode = pt_section_unmap(sfix->section);
+ ptu_int_eq(errcode, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sfix_init(struct section_fixture *sfix)
+{
+ int errcode;
+
+ sfix->section = NULL;
+ sfix->file = NULL;
+ sfix->name = NULL;
+
+ errcode = ptunit_mkfile(&sfix->file, &sfix->name, "wb");
+ ptu_int_eq(errcode, 0);
+
+ ptu_test(ptunit_thrd_init, &sfix->thrd);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sfix_fini(struct section_fixture *sfix)
+{
+ int thrd;
+
+ ptu_test(ptunit_thrd_fini, &sfix->thrd);
+
+ for (thrd = 0; thrd < sfix->thrd.nthreads; ++thrd)
+ ptu_int_eq(sfix->thrd.result[thrd], 0);
+
+ if (sfix->section) {
+ pt_section_put(sfix->section);
+ sfix->section = NULL;
+ }
+
+ if (sfix->file) {
+ fclose(sfix->file);
+ sfix->file = NULL;
+
+ if (sfix->name)
+ remove(sfix->name);
+ }
+
+ if (sfix->name) {
+ free(sfix->name);
+ sfix->name = NULL;
+ }
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct section_fixture sfix;
+ struct ptunit_suite suite;
+
+ sfix.init = sfix_init;
+ sfix.fini = sfix_fini;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run_f(suite, create, sfix);
+ ptu_run_f(suite, create_bad_offset, sfix);
+ ptu_run_f(suite, create_truncated, sfix);
+ ptu_run_f(suite, create_empty, sfix);
+
+ ptu_run(suite, filename_null);
+ ptu_run(suite, offset_null);
+ ptu_run(suite, size_null);
+ ptu_run(suite, get_null);
+ ptu_run(suite, put_null);
+ ptu_run(suite, attach_null);
+ ptu_run(suite, detach_null);
+ ptu_run(suite, map_null);
+ ptu_run(suite, unmap_null);
+ ptu_run(suite, cache_null);
+
+ ptu_run_f(suite, get_overflow, sfix);
+ ptu_run_f(suite, attach_overflow, sfix);
+ ptu_run_f(suite, attach_bad_ucount, sfix);
+ ptu_run_f(suite, map_change, sfix);
+ ptu_run_f(suite, map_put, sfix);
+ ptu_run_f(suite, unmap_nomap, sfix);
+ ptu_run_f(suite, map_overflow, sfix);
+ ptu_run_f(suite, get_put, sfix);
+ ptu_run_f(suite, attach_detach, sfix);
+ ptu_run_f(suite, attach_bad_iscache, sfix);
+ ptu_run_f(suite, detach_bad_iscache, sfix);
+ ptu_run_f(suite, map_unmap, sfix);
+ ptu_run_f(suite, attach_map, sfix);
+ ptu_run_f(suite, attach_bad_map, sfix);
+ ptu_run_f(suite, attach_map_overflow, sfix);
+ ptu_run_f(suite, read, sfix);
+ ptu_run_f(suite, read_null, sfix);
+ ptu_run_f(suite, read_offset, sfix);
+ ptu_run_f(suite, read_truncated, sfix);
+ ptu_run_f(suite, read_from_truncated, sfix);
+ ptu_run_f(suite, read_nomem, sfix);
+ ptu_run_f(suite, read_overflow, sfix);
+ ptu_run_f(suite, read_overflow_32bit, sfix);
+ ptu_run_f(suite, read_nomap, sfix);
+ ptu_run_f(suite, read_unmap_map, sfix);
+
+ ptu_run_f(suite, init_no_bcache, sfix);
+ ptu_run_f(suite, bcache_alloc_free, sfix);
+ ptu_run_f(suite, bcache_alloc_twice, sfix);
+ ptu_run_f(suite, bcache_alloc_nomap, sfix);
+
+ ptu_run_f(suite, memsize_null, sfix);
+ ptu_run_f(suite, memsize_nomap, sfix);
+ ptu_run_f(suite, memsize_unmap, sfix);
+ ptu_run_f(suite, memsize_map_nobcache, sfix);
+ ptu_run_f(suite, memsize_map_bcache, sfix);
+
+ ptu_run_fp(suite, stress, sfix, worker_bcache);
+ ptu_run_fp(suite, stress, sfix, worker_read);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-sync.c b/contrib/processor-trace/libipt/test/src/ptunit-sync.c
new file mode 100644
index 0000000000000..343f9d92886ca
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-sync.c
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_sync.h"
+#include "pt_opcodes.h"
+
+#include "intel-pt.h"
+
+
+/* A test fixture for sync tests. */
+struct sync_fixture {
+ /* The trace buffer. */
+ uint8_t buffer[1024];
+
+ /* A trace configuration. */
+ struct pt_config config;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct sync_fixture *);
+ struct ptunit_result (*fini)(struct sync_fixture *);
+};
+
+static struct ptunit_result sfix_init(struct sync_fixture *sfix)
+{
+ memset(sfix->buffer, 0xcd, sizeof(sfix->buffer));
+
+ memset(&sfix->config, 0, sizeof(sfix->config));
+ sfix->config.size = sizeof(sfix->config);
+ sfix->config.begin = sfix->buffer;
+ sfix->config.end = sfix->buffer + sizeof(sfix->buffer);
+
+ return ptu_passed();
+}
+
+static void sfix_encode_psb(uint8_t *pos)
+{
+ int i;
+
+ *pos++ = pt_opc_psb;
+ *pos++ = pt_ext_psb;
+
+ for (i = 0; i < pt_psb_repeat_count; ++i) {
+ *pos++ = pt_psb_hi;
+ *pos++ = pt_psb_lo;
+ }
+}
+
+
+static struct ptunit_result sync_fwd_null(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ errcode = pt_sync_forward(NULL, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_sync_forward(&sync, NULL, &sfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd_null(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ errcode = pt_sync_backward(NULL, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_sync_backward(&sync, NULL, &sfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_sync_backward(&sync, sfix->config.begin, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_fwd_empty(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix->config.end = sfix->config.begin;
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd_empty(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix->config.end = sfix->config.begin;
+
+ errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_fwd_none(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd_none(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_fwd_here(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.begin);
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(sync, sfix->config.begin);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd_here(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.end - ptps_psb);
+
+ errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(sync, sfix->config.end - ptps_psb);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_fwd(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.begin + 0x23);
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(sync, sfix->config.begin + 0x23);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.begin + 0x23);
+
+ errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config);
+ ptu_int_eq(errcode, 0);
+ ptu_ptr_eq(sync, sfix->config.begin + 0x23);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_fwd_past(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.begin);
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin + ptps_psb,
+ &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd_past(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.end - ptps_psb);
+
+ errcode = pt_sync_backward(&sync, sfix->config.end - ptps_psb,
+ &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_fwd_cutoff(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.begin);
+ sfix_encode_psb(sfix->config.end - ptps_psb);
+ sfix->config.begin += 1;
+ sfix->config.end -= 1;
+
+ errcode = pt_sync_forward(&sync, sfix->config.begin, &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result sync_bwd_cutoff(struct sync_fixture *sfix)
+{
+ const uint8_t *sync;
+ int errcode;
+
+ sfix_encode_psb(sfix->config.begin);
+ sfix_encode_psb(sfix->config.end - ptps_psb);
+ sfix->config.begin += 1;
+ sfix->config.end -= 1;
+
+ errcode = pt_sync_backward(&sync, sfix->config.end, &sfix->config);
+ ptu_int_eq(errcode, -pte_eos);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct sync_fixture sfix;
+ struct ptunit_suite suite;
+
+ sfix.init = sfix_init;
+ sfix.fini = NULL;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run_f(suite, sync_fwd_null, sfix);
+ ptu_run_f(suite, sync_bwd_null, sfix);
+
+ ptu_run_f(suite, sync_fwd_empty, sfix);
+ ptu_run_f(suite, sync_bwd_empty, sfix);
+
+ ptu_run_f(suite, sync_fwd_none, sfix);
+ ptu_run_f(suite, sync_bwd_none, sfix);
+
+ ptu_run_f(suite, sync_fwd_here, sfix);
+ ptu_run_f(suite, sync_bwd_here, sfix);
+
+ ptu_run_f(suite, sync_fwd, sfix);
+ ptu_run_f(suite, sync_bwd, sfix);
+
+ ptu_run_f(suite, sync_fwd_past, sfix);
+ ptu_run_f(suite, sync_bwd_past, sfix);
+
+ ptu_run_f(suite, sync_fwd_cutoff, sfix);
+ ptu_run_f(suite, sync_bwd_cutoff, sfix);
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-time.c b/contrib/processor-trace/libipt/test/src/ptunit-time.c
new file mode 100644
index 0000000000000..5beb623c20bc3
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-time.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2014-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "pt_time.h"
+
+#include "intel-pt.h"
+
+#include "ptunit.h"
+
+
+/* A time unit test fixture. */
+
+struct time_fixture {
+ /* The configuration to use. */
+ struct pt_config config;
+
+ /* The calibration to use. */
+ struct pt_time_cal tcal;
+
+ /* The time struct to update. */
+ struct pt_time time;
+
+ /* The test fixture initialization and finalization functions. */
+ struct ptunit_result (*init)(struct time_fixture *);
+ struct ptunit_result (*fini)(struct time_fixture *);
+};
+
+static struct ptunit_result tfix_init(struct time_fixture *tfix)
+{
+ memset(&tfix->config, 0, sizeof(tfix->config));
+ tfix->config.size = sizeof(tfix->config);
+ tfix->config.cpuid_0x15_eax = 2;
+ tfix->config.cpuid_0x15_ebx = 1;
+ tfix->config.mtc_freq = 4;
+
+ pt_tcal_init(&tfix->tcal);
+ pt_tcal_set_fcr(&tfix->tcal, 0x2ull << pt_tcal_fcr_shr);
+
+ pt_time_init(&tfix->time);
+
+ return ptu_passed();
+}
+
+
+static struct ptunit_result tsc_null(struct time_fixture *tfix)
+{
+ struct pt_packet_tsc packet;
+ int errcode;
+
+ errcode = pt_time_update_tsc(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_tsc(&tfix->time, NULL, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cbr_null(struct time_fixture *tfix)
+{
+ struct pt_packet_cbr packet;
+ int errcode;
+
+ errcode = pt_time_update_cbr(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_cbr(&tfix->time, NULL, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tma_null(struct time_fixture *tfix)
+{
+ struct pt_packet_tma packet;
+ int errcode;
+
+ errcode = pt_time_update_tma(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_tma(&tfix->time, NULL, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_tma(&tfix->time, &packet, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mtc_null(struct time_fixture *tfix)
+{
+ struct pt_packet_mtc packet;
+ int errcode;
+
+ errcode = pt_time_update_mtc(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_mtc(&tfix->time, NULL, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_mtc(&tfix->time, &packet, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cyc_null(struct time_fixture *tfix)
+{
+ struct pt_packet_cyc packet;
+ int errcode;
+
+ errcode = pt_time_update_cyc(NULL, &packet, &tfix->config, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_cyc(&tfix->time, NULL, &tfix->config, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_update_cyc(&tfix->time, &packet, NULL, 0ull);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_tsc_null(struct time_fixture *tfix)
+{
+ uint64_t tsc;
+ int errcode;
+
+ errcode = pt_time_query_tsc(NULL, NULL, NULL, &tfix->time);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_query_tsc(&tsc, NULL, NULL, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_tsc_none(struct time_fixture *tfix)
+{
+ uint64_t tsc;
+ int errcode;
+
+ errcode = pt_time_query_tsc(&tsc, NULL, NULL, &tfix->time);
+ ptu_int_eq(errcode, -pte_no_time);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_cbr_null(struct time_fixture *tfix)
+{
+ uint32_t cbr;
+ int errcode;
+
+ errcode = pt_time_query_cbr(NULL, &tfix->time);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_time_query_cbr(&cbr, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_cbr_none(struct time_fixture *tfix)
+{
+ uint32_t cbr;
+ int errcode;
+
+ errcode = pt_time_query_cbr(&cbr, &tfix->time);
+ ptu_int_eq(errcode, -pte_no_cbr);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tcal_cbr_null(struct time_fixture *tfix)
+{
+ struct pt_packet_cbr packet;
+ int errcode;
+
+ errcode = pt_tcal_update_cbr(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tcal_mtc_null(struct time_fixture *tfix)
+{
+ struct pt_packet_mtc packet;
+ int errcode;
+
+ errcode = pt_tcal_update_mtc(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_tcal_update_mtc(&tfix->tcal, NULL, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_tcal_update_mtc(&tfix->tcal, &packet, NULL);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tcal_cyc_null(struct time_fixture *tfix)
+{
+ struct pt_packet_cyc packet;
+ int errcode;
+
+ errcode = pt_tcal_update_cyc(NULL, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ errcode = pt_tcal_update_cyc(&tfix->tcal, NULL, &tfix->config);
+ ptu_int_eq(errcode, -pte_internal);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tsc(struct time_fixture *tfix)
+{
+ struct pt_packet_tsc packet;
+ uint64_t tsc;
+ uint32_t lost_mtc, lost_cyc;
+ int errcode;
+
+ packet.tsc = 0xdedededeull;
+
+ errcode = pt_time_update_tsc(&tfix->time, &packet, &tfix->config);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_time_query_tsc(&tsc, &lost_mtc, &lost_cyc, &tfix->time);
+ ptu_int_eq(errcode, 0);
+
+ ptu_uint_eq(tsc, 0xdedededeull);
+ ptu_uint_eq(lost_mtc, 0);
+ ptu_uint_eq(lost_cyc, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cbr(struct time_fixture *tfix)
+{
+ struct pt_packet_cbr packet;
+ uint32_t cbr;
+ int errcode;
+
+ packet.ratio = 0x38;
+
+ errcode = pt_time_update_cbr(&tfix->time, &packet, &tfix->config);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_time_query_cbr(&cbr, &tfix->time);
+ ptu_int_eq(errcode, 0);
+
+ ptu_uint_eq(cbr, 0x38);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result tma(struct time_fixture *tfix)
+{
+ struct pt_packet_tma packet;
+ int errcode;
+
+ packet.ctc = 0xdc;
+ packet.fc = 0xf;
+
+ errcode = pt_time_update_tma(&tfix->time, &packet, &tfix->config);
+ ptu_int_eq(errcode, -pte_bad_context);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result mtc(struct time_fixture *tfix)
+{
+ struct pt_packet_mtc packet;
+ uint64_t tsc;
+ int errcode;
+
+ packet.ctc = 0xdc;
+
+ errcode = pt_time_update_mtc(&tfix->time, &packet, &tfix->config);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_time_query_tsc(&tsc, NULL, NULL, &tfix->time);
+ ptu_int_eq(errcode, -pte_no_time);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result cyc(struct time_fixture *tfix)
+{
+ struct pt_packet_cyc packet;
+ uint64_t fcr, tsc;
+ int errcode;
+
+ errcode = pt_tcal_fcr(&fcr, &tfix->tcal);
+ ptu_int_eq(errcode, 0);
+
+ packet.value = 0xdc;
+
+ errcode = pt_time_update_cyc(&tfix->time, &packet, &tfix->config, fcr);
+ ptu_int_eq(errcode, 0);
+
+ errcode = pt_time_query_tsc(&tsc, NULL, NULL, &tfix->time);
+ ptu_int_eq(errcode, -pte_no_time);
+
+ return ptu_passed();
+}
+
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+ struct time_fixture tfix;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ tfix.init = tfix_init;
+ tfix.fini = NULL;
+
+ ptu_run_f(suite, tsc_null, tfix);
+ ptu_run_f(suite, cbr_null, tfix);
+ ptu_run_f(suite, tma_null, tfix);
+ ptu_run_f(suite, mtc_null, tfix);
+ ptu_run_f(suite, cyc_null, tfix);
+
+ ptu_run_f(suite, query_tsc_null, tfix);
+ ptu_run_f(suite, query_tsc_none, tfix);
+ ptu_run_f(suite, query_cbr_null, tfix);
+ ptu_run_f(suite, query_cbr_none, tfix);
+
+ ptu_run_f(suite, tcal_cbr_null, tfix);
+ ptu_run_f(suite, tcal_mtc_null, tfix);
+ ptu_run_f(suite, tcal_cyc_null, tfix);
+
+ ptu_run_f(suite, tsc, tfix);
+ ptu_run_f(suite, cbr, tfix);
+ ptu_run_f(suite, tma, tfix);
+ ptu_run_f(suite, mtc, tfix);
+ ptu_run_f(suite, cyc, tfix);
+
+ /* The bulk is covered in ptt tests. */
+
+ return ptunit_report(&suite);
+}
diff --git a/contrib/processor-trace/libipt/test/src/ptunit-tnt_cache.c b/contrib/processor-trace/libipt/test/src/ptunit-tnt_cache.c
new file mode 100644
index 0000000000000..56631ca04f4f1
--- /dev/null
+++ b/contrib/processor-trace/libipt/test/src/ptunit-tnt_cache.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2013-2018, Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * * Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * * Neither the name of Intel Corporation nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ptunit.h"
+
+#include "pt_tnt_cache.h"
+
+#include "intel-pt.h"
+
+#include <string.h>
+
+
+static struct ptunit_result init(void)
+{
+ struct pt_tnt_cache tnt_cache;
+
+ memset(&tnt_cache, 0xcd, sizeof(tnt_cache));
+
+ pt_tnt_cache_init(&tnt_cache);
+
+ ptu_uint_eq(tnt_cache.tnt, 0ull);
+ ptu_uint_eq(tnt_cache.index, 0ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result init_null(void)
+{
+ pt_tnt_cache_init(NULL);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result is_empty_initial(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int status;
+
+ pt_tnt_cache_init(&tnt_cache);
+
+ status = pt_tnt_cache_is_empty(&tnt_cache);
+ ptu_int_eq(status, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result is_empty_no(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int status;
+
+ tnt_cache.index = 1ull;
+
+ status = pt_tnt_cache_is_empty(&tnt_cache);
+ ptu_int_eq(status, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result is_empty_yes(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int status;
+
+ tnt_cache.index = 0ull;
+
+ status = pt_tnt_cache_is_empty(&tnt_cache);
+ ptu_int_eq(status, 1);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result is_empty_null(void)
+{
+ int status;
+
+ status = pt_tnt_cache_is_empty(NULL);
+ ptu_int_eq(status, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_taken(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int status;
+
+ tnt_cache.tnt = 1ull;
+ tnt_cache.index = 1ull;
+
+ status = pt_tnt_cache_query(&tnt_cache);
+ ptu_int_eq(status, 1);
+ ptu_uint_eq(tnt_cache.index, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_not_taken(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int status;
+
+ tnt_cache.tnt = 0ull;
+ tnt_cache.index = 1ull;
+
+ status = pt_tnt_cache_query(&tnt_cache);
+ ptu_int_eq(status, 0);
+ ptu_uint_eq(tnt_cache.index, 0);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_empty(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int status;
+
+ tnt_cache.index = 0ull;
+
+ status = pt_tnt_cache_query(&tnt_cache);
+ ptu_int_eq(status, -pte_bad_query);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result query_null(void)
+{
+ int status;
+
+ status = pt_tnt_cache_query(NULL);
+ ptu_int_eq(status, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_tnt(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ struct pt_packet_tnt packet;
+ int errcode;
+
+ pt_tnt_cache_init(&tnt_cache);
+
+ packet.bit_size = 4ull;
+ packet.payload = 8ull;
+
+ errcode = pt_tnt_cache_update_tnt(&tnt_cache, &packet, NULL);
+ ptu_int_eq(errcode, 0);
+ ptu_uint_eq(tnt_cache.tnt, 8ull);
+ ptu_uint_eq(tnt_cache.index, 1ull << 3);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_tnt_not_empty(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ struct pt_packet_tnt packet;
+ int errcode;
+
+ tnt_cache.tnt = 42ull;
+ tnt_cache.index = 12ull;
+
+ errcode = pt_tnt_cache_update_tnt(&tnt_cache, &packet, NULL);
+ ptu_int_eq(errcode, -pte_bad_context);
+ ptu_uint_eq(tnt_cache.tnt, 42ull);
+ ptu_uint_eq(tnt_cache.index, 12ull);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_tnt_null_tnt(void)
+{
+ struct pt_packet_tnt packet;
+ int errcode;
+
+ errcode = pt_tnt_cache_update_tnt(NULL, &packet, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+
+ return ptu_passed();
+}
+
+static struct ptunit_result update_tnt_null_packet(void)
+{
+ struct pt_tnt_cache tnt_cache;
+ int errcode;
+
+ tnt_cache.tnt = 42ull;
+ tnt_cache.index = 12ull;
+
+ errcode = pt_tnt_cache_update_tnt(&tnt_cache, NULL, NULL);
+ ptu_int_eq(errcode, -pte_invalid);
+ ptu_uint_eq(tnt_cache.tnt, 42ull);
+ ptu_uint_eq(tnt_cache.index, 12ull);
+
+ return ptu_passed();
+}
+
+int main(int argc, char **argv)
+{
+ struct ptunit_suite suite;
+
+ suite = ptunit_mk_suite(argc, argv);
+
+ ptu_run(suite, init);
+ ptu_run(suite, init_null);
+ ptu_run(suite, is_empty_initial);
+ ptu_run(suite, is_empty_no);
+ ptu_run(suite, is_empty_yes);
+ ptu_run(suite, is_empty_null);
+ ptu_run(suite, query_taken);
+ ptu_run(suite, query_not_taken);
+ ptu_run(suite, query_empty);
+ ptu_run(suite, query_null);
+ ptu_run(suite, update_tnt);
+ ptu_run(suite, update_tnt_not_empty);
+ ptu_run(suite, update_tnt_null_tnt);
+ ptu_run(suite, update_tnt_null_packet);
+
+ return ptunit_report(&suite);
+}