1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt_decoder.c: Intel Processor Trace support
4 * Copyright (c) 2013-2014, Intel Corporation.
16 #include <linux/compiler.h>
17 #include <linux/zalloc.h>
20 #include "../auxtrace.h"
22 #include "intel-pt-insn-decoder.h"
23 #include "intel-pt-pkt-decoder.h"
24 #include "intel-pt-decoder.h"
25 #include "intel-pt-log.h"
27 #define INTEL_PT_BLK_SIZE 1024
29 #define BIT63 (((uint64_t)1 << 63))
31 #define INTEL_PT_RETURN 1
33 /* Maximum number of loops with no packets consumed i.e. stuck in a loop */
34 #define INTEL_PT_MAX_LOOPS 10000
37 struct intel_pt_blk *prev;
38 uint64_t ip[INTEL_PT_BLK_SIZE];
41 struct intel_pt_stack {
42 struct intel_pt_blk *blk;
43 struct intel_pt_blk *spare;
47 enum intel_pt_pkt_state {
48 INTEL_PT_STATE_NO_PSB,
50 INTEL_PT_STATE_ERR_RESYNC,
51 INTEL_PT_STATE_IN_SYNC,
52 INTEL_PT_STATE_TNT_CONT,
55 INTEL_PT_STATE_TIP_PGD,
57 INTEL_PT_STATE_FUP_NO_TIP,
60 static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
63 case INTEL_PT_STATE_NO_PSB:
64 case INTEL_PT_STATE_NO_IP:
65 case INTEL_PT_STATE_ERR_RESYNC:
66 case INTEL_PT_STATE_IN_SYNC:
67 case INTEL_PT_STATE_TNT_CONT:
69 case INTEL_PT_STATE_TNT:
70 case INTEL_PT_STATE_TIP:
71 case INTEL_PT_STATE_TIP_PGD:
72 case INTEL_PT_STATE_FUP:
73 case INTEL_PT_STATE_FUP_NO_TIP:
80 #ifdef INTEL_PT_STRICT
81 #define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB
82 #define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB
83 #define INTEL_PT_STATE_ERR3 INTEL_PT_STATE_NO_PSB
84 #define INTEL_PT_STATE_ERR4 INTEL_PT_STATE_NO_PSB
86 #define INTEL_PT_STATE_ERR1 (decoder->pkt_state)
87 #define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_IP
88 #define INTEL_PT_STATE_ERR3 INTEL_PT_STATE_ERR_RESYNC
89 #define INTEL_PT_STATE_ERR4 INTEL_PT_STATE_IN_SYNC
92 struct intel_pt_decoder {
93 int (*get_trace)(struct intel_pt_buffer *buffer, void *data);
94 int (*walk_insn)(struct intel_pt_insn *intel_pt_insn,
95 uint64_t *insn_cnt_ptr, uint64_t *ip, uint64_t to_ip,
96 uint64_t max_insn_cnt, void *data);
97 bool (*pgd_ip)(uint64_t ip, void *data);
98 int (*lookahead)(void *data, intel_pt_lookahead_cb_t cb, void *cb_data);
100 struct intel_pt_state state;
101 const unsigned char *buf;
103 bool return_compression;
112 enum intel_pt_param_flags flags;
118 uint64_t tsc_timestamp;
119 uint64_t ref_timestamp;
120 uint64_t buf_timestamp;
121 uint64_t sample_timestamp;
123 uint64_t ctc_timestamp;
126 uint64_t cyc_ref_timestamp;
128 uint32_t tsc_ctc_ratio_n;
129 uint32_t tsc_ctc_ratio_d;
130 uint32_t tsc_ctc_mult;
132 uint32_t ctc_rem_mask;
134 struct intel_pt_stack stack;
135 enum intel_pt_pkt_state pkt_state;
136 enum intel_pt_pkt_ctx pkt_ctx;
137 enum intel_pt_pkt_ctx prev_pkt_ctx;
138 enum intel_pt_blk_type blk_type;
140 struct intel_pt_pkt packet;
141 struct intel_pt_pkt tnt;
144 int last_packet_type;
146 unsigned int cbr_seen;
147 unsigned int max_non_turbo_ratio;
148 double max_non_turbo_ratio_fp;
149 double cbr_cyc_to_tsc;
150 double calc_cyc_to_tsc;
151 bool have_calc_cyc_to_tsc;
153 unsigned int insn_bytes;
155 enum intel_pt_period_type period_type;
156 uint64_t tot_insn_cnt;
157 uint64_t period_insn_cnt;
158 uint64_t period_mask;
159 uint64_t period_ticks;
160 uint64_t last_masked_timestamp;
161 uint64_t tot_cyc_cnt;
162 uint64_t sample_tot_cyc_cnt;
163 uint64_t base_cyc_cnt;
164 uint64_t cyc_cnt_timestamp;
166 bool continuous_period;
168 bool set_fup_tx_flags;
175 unsigned int fup_tx_flags;
176 unsigned int tx_flags;
177 uint64_t fup_ptw_payload;
178 uint64_t fup_mwait_payload;
179 uint64_t fup_pwre_payload;
180 uint64_t cbr_payload;
181 uint64_t timestamp_insn_cnt;
182 uint64_t sample_insn_cnt;
187 const unsigned char *next_buf;
189 unsigned char temp_buf[INTEL_PT_PKT_MAX_SZ];
192 static uint64_t intel_pt_lower_power_of_2(uint64_t x)
196 for (i = 0; x != 1; i++)
202 static void intel_pt_setup_period(struct intel_pt_decoder *decoder)
204 if (decoder->period_type == INTEL_PT_PERIOD_TICKS) {
207 period = intel_pt_lower_power_of_2(decoder->period);
208 decoder->period_mask = ~(period - 1);
209 decoder->period_ticks = period;
213 static uint64_t multdiv(uint64_t t, uint32_t n, uint32_t d)
217 return (t / d) * n + ((t % d) * n) / d;
220 struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
222 struct intel_pt_decoder *decoder;
224 if (!params->get_trace || !params->walk_insn)
227 decoder = zalloc(sizeof(struct intel_pt_decoder));
231 decoder->get_trace = params->get_trace;
232 decoder->walk_insn = params->walk_insn;
233 decoder->pgd_ip = params->pgd_ip;
234 decoder->lookahead = params->lookahead;
235 decoder->data = params->data;
236 decoder->return_compression = params->return_compression;
237 decoder->branch_enable = params->branch_enable;
239 decoder->flags = params->flags;
241 decoder->period = params->period;
242 decoder->period_type = params->period_type;
244 decoder->max_non_turbo_ratio = params->max_non_turbo_ratio;
245 decoder->max_non_turbo_ratio_fp = params->max_non_turbo_ratio;
247 intel_pt_setup_period(decoder);
249 decoder->mtc_shift = params->mtc_period;
250 decoder->ctc_rem_mask = (1 << decoder->mtc_shift) - 1;
252 decoder->tsc_ctc_ratio_n = params->tsc_ctc_ratio_n;
253 decoder->tsc_ctc_ratio_d = params->tsc_ctc_ratio_d;
255 if (!decoder->tsc_ctc_ratio_n)
256 decoder->tsc_ctc_ratio_d = 0;
258 if (decoder->tsc_ctc_ratio_d) {
259 if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
260 decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
261 decoder->tsc_ctc_ratio_d;
265 * A TSC packet can slip past MTC packets so that the timestamp appears
266 * to go backwards. One estimate is that can be up to about 40 CPU
267 * cycles, which is certainly less than 0x1000 TSC ticks, but accept
268 * slippage an order of magnitude more to be on the safe side.
270 decoder->tsc_slip = 0x10000;
272 intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
273 intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
274 intel_pt_log("timestamp: tsc_ctc_ratio_d %u\n", decoder->tsc_ctc_ratio_d);
275 intel_pt_log("timestamp: tsc_ctc_mult %u\n", decoder->tsc_ctc_mult);
276 intel_pt_log("timestamp: tsc_slip %#x\n", decoder->tsc_slip);
281 static void intel_pt_pop_blk(struct intel_pt_stack *stack)
283 struct intel_pt_blk *blk = stack->blk;
285 stack->blk = blk->prev;
292 static uint64_t intel_pt_pop(struct intel_pt_stack *stack)
297 intel_pt_pop_blk(stack);
300 stack->pos = INTEL_PT_BLK_SIZE;
302 return stack->blk->ip[--stack->pos];
305 static int intel_pt_alloc_blk(struct intel_pt_stack *stack)
307 struct intel_pt_blk *blk;
313 blk = malloc(sizeof(struct intel_pt_blk));
318 blk->prev = stack->blk;
324 static int intel_pt_push(struct intel_pt_stack *stack, uint64_t ip)
328 if (!stack->blk || stack->pos == INTEL_PT_BLK_SIZE) {
329 err = intel_pt_alloc_blk(stack);
334 stack->blk->ip[stack->pos++] = ip;
338 static void intel_pt_clear_stack(struct intel_pt_stack *stack)
341 intel_pt_pop_blk(stack);
345 static void intel_pt_free_stack(struct intel_pt_stack *stack)
347 intel_pt_clear_stack(stack);
349 zfree(&stack->spare);
352 void intel_pt_decoder_free(struct intel_pt_decoder *decoder)
354 intel_pt_free_stack(&decoder->stack);
358 static int intel_pt_ext_err(int code)
362 return INTEL_PT_ERR_NOMEM;
364 return INTEL_PT_ERR_INTERN;
366 return INTEL_PT_ERR_BADPKT;
368 return INTEL_PT_ERR_NODATA;
370 return INTEL_PT_ERR_NOINSN;
372 return INTEL_PT_ERR_MISMAT;
374 return INTEL_PT_ERR_OVR;
376 return INTEL_PT_ERR_LOST;
378 return INTEL_PT_ERR_NELOOP;
380 return INTEL_PT_ERR_UNK;
384 static const char *intel_pt_err_msgs[] = {
385 [INTEL_PT_ERR_NOMEM] = "Memory allocation failed",
386 [INTEL_PT_ERR_INTERN] = "Internal error",
387 [INTEL_PT_ERR_BADPKT] = "Bad packet",
388 [INTEL_PT_ERR_NODATA] = "No more data",
389 [INTEL_PT_ERR_NOINSN] = "Failed to get instruction",
390 [INTEL_PT_ERR_MISMAT] = "Trace doesn't match instruction",
391 [INTEL_PT_ERR_OVR] = "Overflow packet",
392 [INTEL_PT_ERR_LOST] = "Lost trace data",
393 [INTEL_PT_ERR_UNK] = "Unknown error!",
394 [INTEL_PT_ERR_NELOOP] = "Never-ending loop",
397 int intel_pt__strerror(int code, char *buf, size_t buflen)
399 if (code < 1 || code >= INTEL_PT_ERR_MAX)
400 code = INTEL_PT_ERR_UNK;
401 strlcpy(buf, intel_pt_err_msgs[code], buflen);
405 static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet,
410 switch (packet->count) {
412 ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) |
416 ip = (last_ip & (uint64_t)0xffffffff00000000ULL) |
420 ip = packet->payload;
421 /* Sign-extend 6-byte ip */
422 if (ip & (uint64_t)0x800000000000ULL)
423 ip |= (uint64_t)0xffff000000000000ULL;
426 ip = (last_ip & (uint64_t)0xffff000000000000ULL) |
430 ip = packet->payload;
439 static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
441 decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
442 decoder->have_last_ip = true;
445 static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
447 intel_pt_set_last_ip(decoder);
448 decoder->ip = decoder->last_ip;
451 static void intel_pt_decoder_log_packet(struct intel_pt_decoder *decoder)
453 intel_pt_log_packet(&decoder->packet, decoder->pkt_len, decoder->pos,
457 static int intel_pt_bug(struct intel_pt_decoder *decoder)
459 intel_pt_log("ERROR: Internal error\n");
460 decoder->pkt_state = INTEL_PT_STATE_NO_PSB;
464 static inline void intel_pt_clear_tx_flags(struct intel_pt_decoder *decoder)
466 decoder->tx_flags = 0;
469 static inline void intel_pt_update_in_tx(struct intel_pt_decoder *decoder)
471 decoder->tx_flags = decoder->packet.payload & INTEL_PT_IN_TX;
474 static int intel_pt_bad_packet(struct intel_pt_decoder *decoder)
476 intel_pt_clear_tx_flags(decoder);
477 decoder->have_tma = false;
478 decoder->pkt_len = 1;
479 decoder->pkt_step = 1;
480 intel_pt_decoder_log_packet(decoder);
481 if (decoder->pkt_state != INTEL_PT_STATE_NO_PSB) {
482 intel_pt_log("ERROR: Bad packet\n");
483 decoder->pkt_state = INTEL_PT_STATE_ERR1;
488 static inline void intel_pt_update_sample_time(struct intel_pt_decoder *decoder)
490 decoder->sample_timestamp = decoder->timestamp;
491 decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
494 static void intel_pt_reposition(struct intel_pt_decoder *decoder)
497 decoder->pkt_state = INTEL_PT_STATE_NO_PSB;
498 decoder->timestamp = 0;
499 decoder->have_tma = false;
502 static int intel_pt_get_data(struct intel_pt_decoder *decoder, bool reposition)
504 struct intel_pt_buffer buffer = { .buf = 0, };
507 decoder->pkt_step = 0;
509 intel_pt_log("Getting more data\n");
510 ret = decoder->get_trace(&buffer, decoder->data);
513 decoder->buf = buffer.buf;
514 decoder->len = buffer.len;
516 intel_pt_log("No more data\n");
519 decoder->buf_timestamp = buffer.ref_timestamp;
520 if (!buffer.consecutive || reposition) {
521 intel_pt_reposition(decoder);
522 decoder->ref_timestamp = buffer.ref_timestamp;
523 decoder->state.trace_nr = buffer.trace_nr;
524 intel_pt_log("Reference timestamp 0x%" PRIx64 "\n",
525 decoder->ref_timestamp);
532 static int intel_pt_get_next_data(struct intel_pt_decoder *decoder,
535 if (!decoder->next_buf)
536 return intel_pt_get_data(decoder, reposition);
538 decoder->buf = decoder->next_buf;
539 decoder->len = decoder->next_len;
540 decoder->next_buf = 0;
541 decoder->next_len = 0;
545 static int intel_pt_get_split_packet(struct intel_pt_decoder *decoder)
547 unsigned char *buf = decoder->temp_buf;
548 size_t old_len, len, n;
551 old_len = decoder->len;
553 memcpy(buf, decoder->buf, len);
555 ret = intel_pt_get_data(decoder, false);
557 decoder->pos += old_len;
558 return ret < 0 ? ret : -EINVAL;
561 n = INTEL_PT_PKT_MAX_SZ - len;
562 if (n > decoder->len)
564 memcpy(buf + len, decoder->buf, n);
567 decoder->prev_pkt_ctx = decoder->pkt_ctx;
568 ret = intel_pt_get_packet(buf, len, &decoder->packet, &decoder->pkt_ctx);
569 if (ret < (int)old_len) {
570 decoder->next_buf = decoder->buf;
571 decoder->next_len = decoder->len;
573 decoder->len = old_len;
574 return intel_pt_bad_packet(decoder);
577 decoder->next_buf = decoder->buf + (ret - old_len);
578 decoder->next_len = decoder->len - (ret - old_len);
586 struct intel_pt_pkt_info {
587 struct intel_pt_decoder *decoder;
588 struct intel_pt_pkt packet;
591 int last_packet_type;
595 typedef int (*intel_pt_pkt_cb_t)(struct intel_pt_pkt_info *pkt_info);
597 /* Lookahead packets in current buffer */
598 static int intel_pt_pkt_lookahead(struct intel_pt_decoder *decoder,
599 intel_pt_pkt_cb_t cb, void *data)
601 struct intel_pt_pkt_info pkt_info;
602 const unsigned char *buf = decoder->buf;
603 enum intel_pt_pkt_ctx pkt_ctx = decoder->pkt_ctx;
604 size_t len = decoder->len;
607 pkt_info.decoder = decoder;
608 pkt_info.pos = decoder->pos;
609 pkt_info.pkt_len = decoder->pkt_step;
610 pkt_info.last_packet_type = decoder->last_packet_type;
611 pkt_info.data = data;
615 pkt_info.pos += pkt_info.pkt_len;
616 buf += pkt_info.pkt_len;
617 len -= pkt_info.pkt_len;
620 return INTEL_PT_NEED_MORE_BYTES;
622 ret = intel_pt_get_packet(buf, len, &pkt_info.packet,
625 return INTEL_PT_NEED_MORE_BYTES;
629 pkt_info.pkt_len = ret;
630 } while (pkt_info.packet.type == INTEL_PT_PAD);
636 pkt_info.last_packet_type = pkt_info.packet.type;
640 struct intel_pt_calc_cyc_to_tsc_info {
644 uint64_t ctc_timestamp;
646 uint64_t tsc_timestamp;
651 double cbr_cyc_to_tsc;
655 * MTC provides a 8-bit slice of CTC but the TMA packet only provides the lower
656 * 16 bits of CTC. If mtc_shift > 8 then some of the MTC bits are not in the CTC
657 * provided by the TMA packet. Fix-up the last_mtc calculated from the TMA
658 * packet by copying the missing bits from the current MTC assuming the least
659 * difference between the two, and that the current MTC comes after last_mtc.
661 static void intel_pt_fixup_last_mtc(uint32_t mtc, int mtc_shift,
664 uint32_t first_missing_bit = 1U << (16 - mtc_shift);
665 uint32_t mask = ~(first_missing_bit - 1);
667 *last_mtc |= mtc & mask;
668 if (*last_mtc >= mtc) {
669 *last_mtc -= first_missing_bit;
674 static int intel_pt_calc_cyc_cb(struct intel_pt_pkt_info *pkt_info)
676 struct intel_pt_decoder *decoder = pkt_info->decoder;
677 struct intel_pt_calc_cyc_to_tsc_info *data = pkt_info->data;
681 uint32_t mtc, mtc_delta, ctc, fc, ctc_rem;
683 switch (pkt_info->packet.type) {
685 case INTEL_PT_TIP_PGE:
690 case INTEL_PT_MODE_EXEC:
691 case INTEL_PT_MODE_TSX:
692 case INTEL_PT_PSBEND:
696 case INTEL_PT_PTWRITE:
697 case INTEL_PT_PTWRITE_IP:
701 case INTEL_PT_BEP_IP:
708 mtc = pkt_info->packet.payload;
709 if (decoder->mtc_shift > 8 && data->fixup_last_mtc) {
710 data->fixup_last_mtc = false;
711 intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
714 if (mtc > data->last_mtc)
715 mtc_delta = mtc - data->last_mtc;
717 mtc_delta = mtc + 256 - data->last_mtc;
718 data->ctc_delta += mtc_delta << decoder->mtc_shift;
719 data->last_mtc = mtc;
721 if (decoder->tsc_ctc_mult) {
722 timestamp = data->ctc_timestamp +
723 data->ctc_delta * decoder->tsc_ctc_mult;
725 timestamp = data->ctc_timestamp +
726 multdiv(data->ctc_delta,
727 decoder->tsc_ctc_ratio_n,
728 decoder->tsc_ctc_ratio_d);
731 if (timestamp < data->timestamp)
734 if (pkt_info->last_packet_type != INTEL_PT_CYC) {
735 data->timestamp = timestamp;
743 * For now, do not support using TSC packets - refer
744 * intel_pt_calc_cyc_to_tsc().
748 timestamp = pkt_info->packet.payload |
749 (data->timestamp & (0xffULL << 56));
750 if (data->from_mtc && timestamp < data->timestamp &&
751 data->timestamp - timestamp < decoder->tsc_slip)
753 if (timestamp < data->timestamp)
754 timestamp += (1ULL << 56);
755 if (pkt_info->last_packet_type != INTEL_PT_CYC) {
758 data->tsc_timestamp = timestamp;
759 data->timestamp = timestamp;
768 if (!decoder->tsc_ctc_ratio_d)
771 ctc = pkt_info->packet.payload;
772 fc = pkt_info->packet.count;
773 ctc_rem = ctc & decoder->ctc_rem_mask;
775 data->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
777 data->ctc_timestamp = data->tsc_timestamp - fc;
778 if (decoder->tsc_ctc_mult) {
779 data->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult;
781 data->ctc_timestamp -=
782 multdiv(ctc_rem, decoder->tsc_ctc_ratio_n,
783 decoder->tsc_ctc_ratio_d);
787 data->have_tma = true;
788 data->fixup_last_mtc = true;
793 data->cycle_cnt += pkt_info->packet.payload;
797 cbr = pkt_info->packet.payload;
798 if (data->cbr && data->cbr != cbr)
801 data->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
804 case INTEL_PT_TIP_PGD:
805 case INTEL_PT_TRACESTOP:
806 case INTEL_PT_EXSTOP:
807 case INTEL_PT_EXSTOP_IP:
812 case INTEL_PT_BAD: /* Does not happen */
817 if (!data->cbr && decoder->cbr) {
818 data->cbr = decoder->cbr;
819 data->cbr_cyc_to_tsc = decoder->cbr_cyc_to_tsc;
822 if (!data->cycle_cnt)
825 cyc_to_tsc = (double)(timestamp - decoder->timestamp) / data->cycle_cnt;
827 if (data->cbr && cyc_to_tsc > data->cbr_cyc_to_tsc &&
828 cyc_to_tsc / data->cbr_cyc_to_tsc > 1.25) {
829 intel_pt_log("Timestamp: calculated %g TSC ticks per cycle too big (c.f. CBR-based value %g), pos " x64_fmt "\n",
830 cyc_to_tsc, data->cbr_cyc_to_tsc, pkt_info->pos);
834 decoder->calc_cyc_to_tsc = cyc_to_tsc;
835 decoder->have_calc_cyc_to_tsc = true;
838 intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. CBR-based value %g, pos " x64_fmt "\n",
839 cyc_to_tsc, data->cbr_cyc_to_tsc, pkt_info->pos);
841 intel_pt_log("Timestamp: calculated %g TSC ticks per cycle c.f. unknown CBR-based value, pos " x64_fmt "\n",
842 cyc_to_tsc, pkt_info->pos);
848 static void intel_pt_calc_cyc_to_tsc(struct intel_pt_decoder *decoder,
851 struct intel_pt_calc_cyc_to_tsc_info data = {
854 .last_mtc = decoder->last_mtc,
855 .ctc_timestamp = decoder->ctc_timestamp,
856 .ctc_delta = decoder->ctc_delta,
857 .tsc_timestamp = decoder->tsc_timestamp,
858 .timestamp = decoder->timestamp,
859 .have_tma = decoder->have_tma,
860 .fixup_last_mtc = decoder->fixup_last_mtc,
861 .from_mtc = from_mtc,
866 * For now, do not support using TSC packets for at least the reasons:
867 * 1) timing might have stopped
868 * 2) TSC packets within PSB+ can slip against CYC packets
873 intel_pt_pkt_lookahead(decoder, intel_pt_calc_cyc_cb, &data);
876 static int intel_pt_get_next_packet(struct intel_pt_decoder *decoder)
880 decoder->last_packet_type = decoder->packet.type;
883 decoder->pos += decoder->pkt_step;
884 decoder->buf += decoder->pkt_step;
885 decoder->len -= decoder->pkt_step;
888 ret = intel_pt_get_next_data(decoder, false);
893 decoder->prev_pkt_ctx = decoder->pkt_ctx;
894 ret = intel_pt_get_packet(decoder->buf, decoder->len,
895 &decoder->packet, &decoder->pkt_ctx);
896 if (ret == INTEL_PT_NEED_MORE_BYTES && BITS_PER_LONG == 32 &&
897 decoder->len < INTEL_PT_PKT_MAX_SZ && !decoder->next_buf) {
898 ret = intel_pt_get_split_packet(decoder);
903 return intel_pt_bad_packet(decoder);
905 decoder->pkt_len = ret;
906 decoder->pkt_step = ret;
907 intel_pt_decoder_log_packet(decoder);
908 } while (decoder->packet.type == INTEL_PT_PAD);
913 static uint64_t intel_pt_next_period(struct intel_pt_decoder *decoder)
915 uint64_t timestamp, masked_timestamp;
917 timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
918 masked_timestamp = timestamp & decoder->period_mask;
919 if (decoder->continuous_period) {
920 if (masked_timestamp > decoder->last_masked_timestamp)
924 masked_timestamp = timestamp & decoder->period_mask;
925 if (masked_timestamp > decoder->last_masked_timestamp) {
926 decoder->last_masked_timestamp = masked_timestamp;
927 decoder->continuous_period = true;
931 if (masked_timestamp < decoder->last_masked_timestamp)
932 return decoder->period_ticks;
934 return decoder->period_ticks - (timestamp - masked_timestamp);
937 static uint64_t intel_pt_next_sample(struct intel_pt_decoder *decoder)
939 switch (decoder->period_type) {
940 case INTEL_PT_PERIOD_INSTRUCTIONS:
941 return decoder->period - decoder->period_insn_cnt;
942 case INTEL_PT_PERIOD_TICKS:
943 return intel_pt_next_period(decoder);
944 case INTEL_PT_PERIOD_NONE:
945 case INTEL_PT_PERIOD_MTC:
951 static void intel_pt_sample_insn(struct intel_pt_decoder *decoder)
953 uint64_t timestamp, masked_timestamp;
955 switch (decoder->period_type) {
956 case INTEL_PT_PERIOD_INSTRUCTIONS:
957 decoder->period_insn_cnt = 0;
959 case INTEL_PT_PERIOD_TICKS:
960 timestamp = decoder->timestamp + decoder->timestamp_insn_cnt;
961 masked_timestamp = timestamp & decoder->period_mask;
962 if (masked_timestamp > decoder->last_masked_timestamp)
963 decoder->last_masked_timestamp = masked_timestamp;
965 decoder->last_masked_timestamp += decoder->period_ticks;
967 case INTEL_PT_PERIOD_NONE:
968 case INTEL_PT_PERIOD_MTC:
973 decoder->state.type |= INTEL_PT_INSTRUCTION;
976 static int intel_pt_walk_insn(struct intel_pt_decoder *decoder,
977 struct intel_pt_insn *intel_pt_insn, uint64_t ip)
979 uint64_t max_insn_cnt, insn_cnt = 0;
982 if (!decoder->mtc_insn)
983 decoder->mtc_insn = true;
985 max_insn_cnt = intel_pt_next_sample(decoder);
987 err = decoder->walk_insn(intel_pt_insn, &insn_cnt, &decoder->ip, ip,
988 max_insn_cnt, decoder->data);
990 decoder->tot_insn_cnt += insn_cnt;
991 decoder->timestamp_insn_cnt += insn_cnt;
992 decoder->sample_insn_cnt += insn_cnt;
993 decoder->period_insn_cnt += insn_cnt;
996 decoder->no_progress = 0;
997 decoder->pkt_state = INTEL_PT_STATE_ERR2;
998 intel_pt_log_at("ERROR: Failed to get instruction",
1005 if (ip && decoder->ip == ip) {
1010 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
1011 intel_pt_sample_insn(decoder);
1013 if (intel_pt_insn->branch == INTEL_PT_BR_NO_BRANCH) {
1014 decoder->state.type = INTEL_PT_INSTRUCTION;
1015 decoder->state.from_ip = decoder->ip;
1016 decoder->state.to_ip = 0;
1017 decoder->ip += intel_pt_insn->length;
1018 err = INTEL_PT_RETURN;
1022 if (intel_pt_insn->op == INTEL_PT_OP_CALL) {
1023 /* Zero-length calls are excluded */
1024 if (intel_pt_insn->branch != INTEL_PT_BR_UNCONDITIONAL ||
1025 intel_pt_insn->rel) {
1026 err = intel_pt_push(&decoder->stack, decoder->ip +
1027 intel_pt_insn->length);
1031 } else if (intel_pt_insn->op == INTEL_PT_OP_RET) {
1032 decoder->ret_addr = intel_pt_pop(&decoder->stack);
1035 if (intel_pt_insn->branch == INTEL_PT_BR_UNCONDITIONAL) {
1036 int cnt = decoder->no_progress++;
1038 decoder->state.from_ip = decoder->ip;
1039 decoder->ip += intel_pt_insn->length +
1041 decoder->state.to_ip = decoder->ip;
1042 err = INTEL_PT_RETURN;
1045 * Check for being stuck in a loop. This can happen if a
1046 * decoder error results in the decoder erroneously setting the
1047 * ip to an address that is itself in an infinite loop that
1048 * consumes no packets. When that happens, there must be an
1049 * unconditional branch.
1053 decoder->stuck_ip = decoder->state.to_ip;
1054 decoder->stuck_ip_prd = 1;
1055 decoder->stuck_ip_cnt = 1;
1056 } else if (cnt > INTEL_PT_MAX_LOOPS ||
1057 decoder->state.to_ip == decoder->stuck_ip) {
1058 intel_pt_log_at("ERROR: Never-ending loop",
1059 decoder->state.to_ip);
1060 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1063 } else if (!--decoder->stuck_ip_cnt) {
1064 decoder->stuck_ip_prd += 1;
1065 decoder->stuck_ip_cnt = decoder->stuck_ip_prd;
1066 decoder->stuck_ip = decoder->state.to_ip;
1069 goto out_no_progress;
1072 decoder->no_progress = 0;
1074 decoder->state.insn_op = intel_pt_insn->op;
1075 decoder->state.insn_len = intel_pt_insn->length;
1076 memcpy(decoder->state.insn, intel_pt_insn->buf,
1077 INTEL_PT_INSN_BUF_SZ);
1079 if (decoder->tx_flags & INTEL_PT_IN_TX)
1080 decoder->state.flags |= INTEL_PT_IN_TX;
1085 static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
1089 if (decoder->set_fup_tx_flags) {
1090 decoder->set_fup_tx_flags = false;
1091 decoder->tx_flags = decoder->fup_tx_flags;
1092 decoder->state.type = INTEL_PT_TRANSACTION;
1093 decoder->state.from_ip = decoder->ip;
1094 decoder->state.to_ip = 0;
1095 decoder->state.flags = decoder->fup_tx_flags;
1098 if (decoder->set_fup_ptw) {
1099 decoder->set_fup_ptw = false;
1100 decoder->state.type = INTEL_PT_PTW;
1101 decoder->state.flags |= INTEL_PT_FUP_IP;
1102 decoder->state.from_ip = decoder->ip;
1103 decoder->state.to_ip = 0;
1104 decoder->state.ptw_payload = decoder->fup_ptw_payload;
1107 if (decoder->set_fup_mwait) {
1108 decoder->set_fup_mwait = false;
1109 decoder->state.type = INTEL_PT_MWAIT_OP;
1110 decoder->state.from_ip = decoder->ip;
1111 decoder->state.to_ip = 0;
1112 decoder->state.mwait_payload = decoder->fup_mwait_payload;
1115 if (decoder->set_fup_pwre) {
1116 decoder->set_fup_pwre = false;
1117 decoder->state.type |= INTEL_PT_PWR_ENTRY;
1118 decoder->state.type &= ~INTEL_PT_BRANCH;
1119 decoder->state.from_ip = decoder->ip;
1120 decoder->state.to_ip = 0;
1121 decoder->state.pwre_payload = decoder->fup_pwre_payload;
1124 if (decoder->set_fup_exstop) {
1125 decoder->set_fup_exstop = false;
1126 decoder->state.type |= INTEL_PT_EX_STOP;
1127 decoder->state.type &= ~INTEL_PT_BRANCH;
1128 decoder->state.flags |= INTEL_PT_FUP_IP;
1129 decoder->state.from_ip = decoder->ip;
1130 decoder->state.to_ip = 0;
1133 if (decoder->set_fup_bep) {
1134 decoder->set_fup_bep = false;
1135 decoder->state.type |= INTEL_PT_BLK_ITEMS;
1136 decoder->state.type &= ~INTEL_PT_BRANCH;
1137 decoder->state.from_ip = decoder->ip;
1138 decoder->state.to_ip = 0;
1144 static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder,
1145 struct intel_pt_insn *intel_pt_insn,
1146 uint64_t ip, int err)
1148 return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err &&
1149 intel_pt_insn->branch == INTEL_PT_BR_INDIRECT &&
1150 ip == decoder->ip + intel_pt_insn->length;
1153 static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
1155 struct intel_pt_insn intel_pt_insn;
1159 ip = decoder->last_ip;
1162 err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
1163 if (err == INTEL_PT_RETURN)
1165 if (err == -EAGAIN ||
1166 intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
1167 if (intel_pt_fup_event(decoder))
1171 decoder->set_fup_tx_flags = false;
1175 if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
1176 intel_pt_log_at("ERROR: Unexpected indirect branch",
1178 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1182 if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
1183 intel_pt_log_at("ERROR: Unexpected conditional branch",
1185 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1189 intel_pt_bug(decoder);
1193 static int intel_pt_walk_tip(struct intel_pt_decoder *decoder)
1195 struct intel_pt_insn intel_pt_insn;
1198 err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0);
1199 if (err == INTEL_PT_RETURN &&
1201 decoder->pkt_state == INTEL_PT_STATE_TIP_PGD &&
1202 (decoder->state.type & INTEL_PT_BRANCH) &&
1203 decoder->pgd_ip(decoder->state.to_ip, decoder->data)) {
1204 /* Unconditional branch leaving filter region */
1205 decoder->no_progress = 0;
1206 decoder->pge = false;
1207 decoder->continuous_period = false;
1208 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1209 decoder->state.type |= INTEL_PT_TRACE_END;
1212 if (err == INTEL_PT_RETURN)
1217 if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
1218 if (decoder->pkt_state == INTEL_PT_STATE_TIP_PGD) {
1219 decoder->pge = false;
1220 decoder->continuous_period = false;
1221 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1222 decoder->state.from_ip = decoder->ip;
1223 if (decoder->packet.count == 0) {
1224 decoder->state.to_ip = 0;
1226 decoder->state.to_ip = decoder->last_ip;
1227 decoder->ip = decoder->last_ip;
1229 decoder->state.type |= INTEL_PT_TRACE_END;
1231 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1232 decoder->state.from_ip = decoder->ip;
1233 if (decoder->packet.count == 0) {
1234 decoder->state.to_ip = 0;
1236 decoder->state.to_ip = decoder->last_ip;
1237 decoder->ip = decoder->last_ip;
1243 if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
1244 uint64_t to_ip = decoder->ip + intel_pt_insn.length +
1247 if (decoder->pgd_ip &&
1248 decoder->pkt_state == INTEL_PT_STATE_TIP_PGD &&
1249 decoder->pgd_ip(to_ip, decoder->data)) {
1250 /* Conditional branch leaving filter region */
1251 decoder->pge = false;
1252 decoder->continuous_period = false;
1253 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1254 decoder->ip = to_ip;
1255 decoder->state.from_ip = decoder->ip;
1256 decoder->state.to_ip = to_ip;
1257 decoder->state.type |= INTEL_PT_TRACE_END;
1260 intel_pt_log_at("ERROR: Conditional branch when expecting indirect branch",
1262 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1266 return intel_pt_bug(decoder);
1269 static int intel_pt_walk_tnt(struct intel_pt_decoder *decoder)
1271 struct intel_pt_insn intel_pt_insn;
1275 err = intel_pt_walk_insn(decoder, &intel_pt_insn, 0);
1276 if (err == INTEL_PT_RETURN)
1281 if (intel_pt_insn.op == INTEL_PT_OP_RET) {
1282 if (!decoder->return_compression) {
1283 intel_pt_log_at("ERROR: RET when expecting conditional branch",
1285 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1288 if (!decoder->ret_addr) {
1289 intel_pt_log_at("ERROR: Bad RET compression (stack empty)",
1291 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1294 if (!(decoder->tnt.payload & BIT63)) {
1295 intel_pt_log_at("ERROR: Bad RET compression (TNT=N)",
1297 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1300 decoder->tnt.count -= 1;
1301 if (decoder->tnt.count)
1302 decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
1304 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1305 decoder->tnt.payload <<= 1;
1306 decoder->state.from_ip = decoder->ip;
1307 decoder->ip = decoder->ret_addr;
1308 decoder->state.to_ip = decoder->ip;
1312 if (intel_pt_insn.branch == INTEL_PT_BR_INDIRECT) {
1313 /* Handle deferred TIPs */
1314 err = intel_pt_get_next_packet(decoder);
1317 if (decoder->packet.type != INTEL_PT_TIP ||
1318 decoder->packet.count == 0) {
1319 intel_pt_log_at("ERROR: Missing deferred TIP for indirect branch",
1321 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1322 decoder->pkt_step = 0;
1325 intel_pt_set_last_ip(decoder);
1326 decoder->state.from_ip = decoder->ip;
1327 decoder->state.to_ip = decoder->last_ip;
1328 decoder->ip = decoder->last_ip;
1332 if (intel_pt_insn.branch == INTEL_PT_BR_CONDITIONAL) {
1333 decoder->tnt.count -= 1;
1334 if (decoder->tnt.count)
1335 decoder->pkt_state = INTEL_PT_STATE_TNT_CONT;
1337 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
1338 if (decoder->tnt.payload & BIT63) {
1339 decoder->tnt.payload <<= 1;
1340 decoder->state.from_ip = decoder->ip;
1341 decoder->ip += intel_pt_insn.length +
1343 decoder->state.to_ip = decoder->ip;
1346 /* Instruction sample for a non-taken branch */
1347 if (decoder->state.type & INTEL_PT_INSTRUCTION) {
1348 decoder->tnt.payload <<= 1;
1349 decoder->state.type = INTEL_PT_INSTRUCTION;
1350 decoder->state.from_ip = decoder->ip;
1351 decoder->state.to_ip = 0;
1352 decoder->ip += intel_pt_insn.length;
1355 decoder->sample_cyc = false;
1356 decoder->ip += intel_pt_insn.length;
1357 if (!decoder->tnt.count) {
1358 intel_pt_update_sample_time(decoder);
1361 decoder->tnt.payload <<= 1;
1365 return intel_pt_bug(decoder);
1369 static int intel_pt_mode_tsx(struct intel_pt_decoder *decoder, bool *no_tip)
1371 unsigned int fup_tx_flags;
1374 fup_tx_flags = decoder->packet.payload &
1375 (INTEL_PT_IN_TX | INTEL_PT_ABORT_TX);
1376 err = intel_pt_get_next_packet(decoder);
1379 if (decoder->packet.type == INTEL_PT_FUP) {
1380 decoder->fup_tx_flags = fup_tx_flags;
1381 decoder->set_fup_tx_flags = true;
1382 if (!(decoder->fup_tx_flags & INTEL_PT_ABORT_TX))
1385 intel_pt_log_at("ERROR: Missing FUP after MODE.TSX",
1387 intel_pt_update_in_tx(decoder);
1392 static uint64_t intel_pt_8b_tsc(uint64_t timestamp, uint64_t ref_timestamp)
1394 timestamp |= (ref_timestamp & (0xffULL << 56));
1396 if (timestamp < ref_timestamp) {
1397 if (ref_timestamp - timestamp > (1ULL << 55))
1398 timestamp += (1ULL << 56);
1400 if (timestamp - ref_timestamp > (1ULL << 55))
1401 timestamp -= (1ULL << 56);
1407 static void intel_pt_calc_tsc_timestamp(struct intel_pt_decoder *decoder)
1411 decoder->have_tma = false;
1413 if (decoder->ref_timestamp) {
1414 timestamp = intel_pt_8b_tsc(decoder->packet.payload,
1415 decoder->ref_timestamp);
1416 decoder->tsc_timestamp = timestamp;
1417 decoder->timestamp = timestamp;
1418 decoder->ref_timestamp = 0;
1419 decoder->timestamp_insn_cnt = 0;
1420 } else if (decoder->timestamp) {
1421 timestamp = decoder->packet.payload |
1422 (decoder->timestamp & (0xffULL << 56));
1423 decoder->tsc_timestamp = timestamp;
1424 if (timestamp < decoder->timestamp &&
1425 decoder->timestamp - timestamp < decoder->tsc_slip) {
1426 intel_pt_log_to("Suppressing backwards timestamp",
1428 timestamp = decoder->timestamp;
1430 if (timestamp < decoder->timestamp) {
1431 intel_pt_log_to("Wraparound timestamp", timestamp);
1432 timestamp += (1ULL << 56);
1433 decoder->tsc_timestamp = timestamp;
1435 decoder->timestamp = timestamp;
1436 decoder->timestamp_insn_cnt = 0;
1439 if (decoder->last_packet_type == INTEL_PT_CYC) {
1440 decoder->cyc_ref_timestamp = decoder->timestamp;
1441 decoder->cycle_cnt = 0;
1442 decoder->have_calc_cyc_to_tsc = false;
1443 intel_pt_calc_cyc_to_tsc(decoder, false);
1446 intel_pt_log_to("Setting timestamp", decoder->timestamp);
1449 static int intel_pt_overflow(struct intel_pt_decoder *decoder)
1451 intel_pt_log("ERROR: Buffer overflow\n");
1452 intel_pt_clear_tx_flags(decoder);
1453 decoder->timestamp_insn_cnt = 0;
1454 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
1455 decoder->overflow = true;
1459 static inline void intel_pt_mtc_cyc_cnt_pge(struct intel_pt_decoder *decoder)
1461 if (decoder->have_cyc)
1464 decoder->cyc_cnt_timestamp = decoder->timestamp;
1465 decoder->base_cyc_cnt = decoder->tot_cyc_cnt;
1468 static inline void intel_pt_mtc_cyc_cnt_cbr(struct intel_pt_decoder *decoder)
1470 decoder->tsc_to_cyc = decoder->cbr / decoder->max_non_turbo_ratio_fp;
1473 intel_pt_mtc_cyc_cnt_pge(decoder);
1476 static inline void intel_pt_mtc_cyc_cnt_upd(struct intel_pt_decoder *decoder)
1478 uint64_t tot_cyc_cnt, tsc_delta;
1480 if (decoder->have_cyc)
1483 decoder->sample_cyc = true;
1485 if (!decoder->pge || decoder->timestamp <= decoder->cyc_cnt_timestamp)
1488 tsc_delta = decoder->timestamp - decoder->cyc_cnt_timestamp;
1489 tot_cyc_cnt = tsc_delta * decoder->tsc_to_cyc + decoder->base_cyc_cnt;
1491 if (tot_cyc_cnt > decoder->tot_cyc_cnt)
1492 decoder->tot_cyc_cnt = tot_cyc_cnt;
1495 static void intel_pt_calc_tma(struct intel_pt_decoder *decoder)
1497 uint32_t ctc = decoder->packet.payload;
1498 uint32_t fc = decoder->packet.count;
1499 uint32_t ctc_rem = ctc & decoder->ctc_rem_mask;
1501 if (!decoder->tsc_ctc_ratio_d)
1504 if (decoder->pge && !decoder->in_psb)
1505 intel_pt_mtc_cyc_cnt_pge(decoder);
1507 intel_pt_mtc_cyc_cnt_upd(decoder);
1509 decoder->last_mtc = (ctc >> decoder->mtc_shift) & 0xff;
1510 decoder->ctc_timestamp = decoder->tsc_timestamp - fc;
1511 if (decoder->tsc_ctc_mult) {
1512 decoder->ctc_timestamp -= ctc_rem * decoder->tsc_ctc_mult;
1514 decoder->ctc_timestamp -= multdiv(ctc_rem,
1515 decoder->tsc_ctc_ratio_n,
1516 decoder->tsc_ctc_ratio_d);
1518 decoder->ctc_delta = 0;
1519 decoder->have_tma = true;
1520 decoder->fixup_last_mtc = true;
1521 intel_pt_log("CTC timestamp " x64_fmt " last MTC %#x CTC rem %#x\n",
1522 decoder->ctc_timestamp, decoder->last_mtc, ctc_rem);
1525 static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
1528 uint32_t mtc, mtc_delta;
1530 if (!decoder->have_tma)
1533 mtc = decoder->packet.payload;
1535 if (decoder->mtc_shift > 8 && decoder->fixup_last_mtc) {
1536 decoder->fixup_last_mtc = false;
1537 intel_pt_fixup_last_mtc(mtc, decoder->mtc_shift,
1538 &decoder->last_mtc);
1541 if (mtc > decoder->last_mtc)
1542 mtc_delta = mtc - decoder->last_mtc;
1544 mtc_delta = mtc + 256 - decoder->last_mtc;
1546 decoder->ctc_delta += mtc_delta << decoder->mtc_shift;
1548 if (decoder->tsc_ctc_mult) {
1549 timestamp = decoder->ctc_timestamp +
1550 decoder->ctc_delta * decoder->tsc_ctc_mult;
1552 timestamp = decoder->ctc_timestamp +
1553 multdiv(decoder->ctc_delta,
1554 decoder->tsc_ctc_ratio_n,
1555 decoder->tsc_ctc_ratio_d);
1558 if (timestamp < decoder->timestamp)
1559 intel_pt_log("Suppressing MTC timestamp " x64_fmt " less than current timestamp " x64_fmt "\n",
1560 timestamp, decoder->timestamp);
1562 decoder->timestamp = timestamp;
1564 intel_pt_mtc_cyc_cnt_upd(decoder);
1566 decoder->timestamp_insn_cnt = 0;
1567 decoder->last_mtc = mtc;
1569 if (decoder->last_packet_type == INTEL_PT_CYC) {
1570 decoder->cyc_ref_timestamp = decoder->timestamp;
1571 decoder->cycle_cnt = 0;
1572 decoder->have_calc_cyc_to_tsc = false;
1573 intel_pt_calc_cyc_to_tsc(decoder, true);
1576 intel_pt_log_to("Setting timestamp", decoder->timestamp);
1579 static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
1581 unsigned int cbr = decoder->packet.payload & 0xff;
1583 decoder->cbr_payload = decoder->packet.payload;
1585 if (decoder->cbr == cbr)
1589 decoder->cbr_cyc_to_tsc = decoder->max_non_turbo_ratio_fp / cbr;
1591 intel_pt_mtc_cyc_cnt_cbr(decoder);
1594 static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
1596 uint64_t timestamp = decoder->cyc_ref_timestamp;
1598 decoder->have_cyc = true;
1600 decoder->cycle_cnt += decoder->packet.payload;
1602 decoder->tot_cyc_cnt += decoder->packet.payload;
1603 decoder->sample_cyc = true;
1605 if (!decoder->cyc_ref_timestamp)
1608 if (decoder->have_calc_cyc_to_tsc)
1609 timestamp += decoder->cycle_cnt * decoder->calc_cyc_to_tsc;
1610 else if (decoder->cbr)
1611 timestamp += decoder->cycle_cnt * decoder->cbr_cyc_to_tsc;
1615 if (timestamp < decoder->timestamp)
1616 intel_pt_log("Suppressing CYC timestamp " x64_fmt " less than current timestamp " x64_fmt "\n",
1617 timestamp, decoder->timestamp);
1619 decoder->timestamp = timestamp;
1621 decoder->timestamp_insn_cnt = 0;
1623 intel_pt_log_to("Setting timestamp", decoder->timestamp);
1626 static void intel_pt_bbp(struct intel_pt_decoder *decoder)
1628 if (decoder->prev_pkt_ctx == INTEL_PT_NO_CTX) {
1629 memset(decoder->state.items.mask, 0, sizeof(decoder->state.items.mask));
1630 decoder->state.items.is_32_bit = false;
1632 decoder->blk_type = decoder->packet.payload;
1633 decoder->blk_type_pos = intel_pt_blk_type_pos(decoder->blk_type);
1634 if (decoder->blk_type == INTEL_PT_GP_REGS)
1635 decoder->state.items.is_32_bit = decoder->packet.count;
1636 if (decoder->blk_type_pos < 0) {
1637 intel_pt_log("WARNING: Unknown block type %u\n",
1639 } else if (decoder->state.items.mask[decoder->blk_type_pos]) {
1640 intel_pt_log("WARNING: Duplicate block type %u\n",
1645 static void intel_pt_bip(struct intel_pt_decoder *decoder)
1647 uint32_t id = decoder->packet.count;
1648 uint32_t bit = 1 << id;
1649 int pos = decoder->blk_type_pos;
1651 if (pos < 0 || id >= INTEL_PT_BLK_ITEM_ID_CNT) {
1652 intel_pt_log("WARNING: Unknown block item %u type %d\n",
1653 id, decoder->blk_type);
1657 if (decoder->state.items.mask[pos] & bit) {
1658 intel_pt_log("WARNING: Duplicate block item %u type %d\n",
1659 id, decoder->blk_type);
1662 decoder->state.items.mask[pos] |= bit;
1663 decoder->state.items.val[pos][id] = decoder->packet.payload;
1666 /* Walk PSB+ packets when already in sync. */
1667 static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
1671 decoder->in_psb = true;
1674 err = intel_pt_get_next_packet(decoder);
1678 switch (decoder->packet.type) {
1679 case INTEL_PT_PSBEND:
1683 case INTEL_PT_TIP_PGD:
1684 case INTEL_PT_TIP_PGE:
1687 case INTEL_PT_TRACESTOP:
1690 case INTEL_PT_PTWRITE:
1691 case INTEL_PT_PTWRITE_IP:
1692 case INTEL_PT_EXSTOP:
1693 case INTEL_PT_EXSTOP_IP:
1694 case INTEL_PT_MWAIT:
1700 case INTEL_PT_BEP_IP:
1701 decoder->have_tma = false;
1702 intel_pt_log("ERROR: Unexpected packet\n");
1707 err = intel_pt_overflow(decoder);
1711 intel_pt_calc_tsc_timestamp(decoder);
1715 intel_pt_calc_tma(decoder);
1719 intel_pt_calc_cbr(decoder);
1722 case INTEL_PT_MODE_EXEC:
1723 decoder->exec_mode = decoder->packet.payload;
1727 decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
1731 decoder->pge = true;
1732 if (decoder->packet.count)
1733 intel_pt_set_last_ip(decoder);
1736 case INTEL_PT_MODE_TSX:
1737 intel_pt_update_in_tx(decoder);
1741 intel_pt_calc_mtc_timestamp(decoder);
1742 if (decoder->period_type == INTEL_PT_PERIOD_MTC)
1743 decoder->state.type |= INTEL_PT_INSTRUCTION;
1755 decoder->in_psb = false;
1760 static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
1764 if (decoder->tx_flags & INTEL_PT_ABORT_TX) {
1765 decoder->tx_flags = 0;
1766 decoder->state.flags &= ~INTEL_PT_IN_TX;
1767 decoder->state.flags |= INTEL_PT_ABORT_TX;
1769 decoder->state.flags |= INTEL_PT_ASYNC;
1773 err = intel_pt_get_next_packet(decoder);
1777 switch (decoder->packet.type) {
1780 case INTEL_PT_TRACESTOP:
1784 case INTEL_PT_MODE_TSX:
1786 case INTEL_PT_PSBEND:
1787 case INTEL_PT_PTWRITE:
1788 case INTEL_PT_PTWRITE_IP:
1789 case INTEL_PT_EXSTOP:
1790 case INTEL_PT_EXSTOP_IP:
1791 case INTEL_PT_MWAIT:
1797 case INTEL_PT_BEP_IP:
1798 intel_pt_log("ERROR: Missing TIP after FUP\n");
1799 decoder->pkt_state = INTEL_PT_STATE_ERR3;
1800 decoder->pkt_step = 0;
1804 intel_pt_calc_cbr(decoder);
1808 return intel_pt_overflow(decoder);
1810 case INTEL_PT_TIP_PGD:
1811 decoder->state.from_ip = decoder->ip;
1812 if (decoder->packet.count == 0) {
1813 decoder->state.to_ip = 0;
1815 intel_pt_set_ip(decoder);
1816 decoder->state.to_ip = decoder->ip;
1818 decoder->pge = false;
1819 decoder->continuous_period = false;
1820 decoder->state.type |= INTEL_PT_TRACE_END;
1823 case INTEL_PT_TIP_PGE:
1824 decoder->pge = true;
1825 intel_pt_log("Omitting PGE ip " x64_fmt "\n",
1827 decoder->state.from_ip = 0;
1828 if (decoder->packet.count == 0) {
1829 decoder->state.to_ip = 0;
1831 intel_pt_set_ip(decoder);
1832 decoder->state.to_ip = decoder->ip;
1834 decoder->state.type |= INTEL_PT_TRACE_BEGIN;
1835 intel_pt_mtc_cyc_cnt_pge(decoder);
1839 decoder->state.from_ip = decoder->ip;
1840 if (decoder->packet.count == 0) {
1841 decoder->state.to_ip = 0;
1843 intel_pt_set_ip(decoder);
1844 decoder->state.to_ip = decoder->ip;
1849 decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
1853 intel_pt_calc_mtc_timestamp(decoder);
1854 if (decoder->period_type == INTEL_PT_PERIOD_MTC)
1855 decoder->state.type |= INTEL_PT_INSTRUCTION;
1859 intel_pt_calc_cyc_timestamp(decoder);
1862 case INTEL_PT_MODE_EXEC:
1863 decoder->exec_mode = decoder->packet.payload;
1872 return intel_pt_bug(decoder);
1877 static int intel_pt_walk_trace(struct intel_pt_decoder *decoder)
1879 bool no_tip = false;
1883 err = intel_pt_get_next_packet(decoder);
1887 switch (decoder->packet.type) {
1889 if (!decoder->packet.count)
1891 decoder->tnt = decoder->packet;
1892 decoder->pkt_state = INTEL_PT_STATE_TNT;
1893 err = intel_pt_walk_tnt(decoder);
1898 case INTEL_PT_TIP_PGD:
1899 if (decoder->packet.count != 0)
1900 intel_pt_set_last_ip(decoder);
1901 decoder->pkt_state = INTEL_PT_STATE_TIP_PGD;
1902 return intel_pt_walk_tip(decoder);
1904 case INTEL_PT_TIP_PGE: {
1905 decoder->pge = true;
1906 intel_pt_mtc_cyc_cnt_pge(decoder);
1907 if (decoder->packet.count == 0) {
1908 intel_pt_log_at("Skipping zero TIP.PGE",
1912 intel_pt_set_ip(decoder);
1913 decoder->state.from_ip = 0;
1914 decoder->state.to_ip = decoder->ip;
1915 decoder->state.type |= INTEL_PT_TRACE_BEGIN;
1920 return intel_pt_overflow(decoder);
1923 if (decoder->packet.count != 0)
1924 intel_pt_set_last_ip(decoder);
1925 decoder->pkt_state = INTEL_PT_STATE_TIP;
1926 return intel_pt_walk_tip(decoder);
1929 if (decoder->packet.count == 0) {
1930 intel_pt_log_at("Skipping zero FUP",
1935 intel_pt_set_last_ip(decoder);
1936 if (!decoder->branch_enable) {
1937 decoder->ip = decoder->last_ip;
1938 if (intel_pt_fup_event(decoder))
1943 if (decoder->set_fup_mwait)
1945 err = intel_pt_walk_fup(decoder);
1946 if (err != -EAGAIN) {
1950 decoder->pkt_state =
1951 INTEL_PT_STATE_FUP_NO_TIP;
1953 decoder->pkt_state = INTEL_PT_STATE_FUP;
1960 return intel_pt_walk_fup_tip(decoder);
1962 case INTEL_PT_TRACESTOP:
1963 decoder->pge = false;
1964 decoder->continuous_period = false;
1965 intel_pt_clear_tx_flags(decoder);
1966 decoder->have_tma = false;
1970 decoder->last_ip = 0;
1971 decoder->have_last_ip = true;
1972 intel_pt_clear_stack(&decoder->stack);
1973 err = intel_pt_walk_psbend(decoder);
1979 * PSB+ CBR will not have changed but cater for the
1980 * possibility of another CBR change that gets caught up
1983 if (decoder->cbr != decoder->cbr_seen)
1988 decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
1992 intel_pt_calc_mtc_timestamp(decoder);
1993 if (decoder->period_type != INTEL_PT_PERIOD_MTC)
1996 * Ensure that there has been an instruction since the
1999 if (!decoder->mtc_insn)
2001 decoder->mtc_insn = false;
2002 /* Ensure that there is a timestamp */
2003 if (!decoder->timestamp)
2005 decoder->state.type = INTEL_PT_INSTRUCTION;
2006 decoder->state.from_ip = decoder->ip;
2007 decoder->state.to_ip = 0;
2008 decoder->mtc_insn = false;
2012 intel_pt_calc_tsc_timestamp(decoder);
2016 intel_pt_calc_tma(decoder);
2020 intel_pt_calc_cyc_timestamp(decoder);
2024 intel_pt_calc_cbr(decoder);
2025 if (decoder->cbr != decoder->cbr_seen)
2029 case INTEL_PT_MODE_EXEC:
2030 decoder->exec_mode = decoder->packet.payload;
2033 case INTEL_PT_MODE_TSX:
2034 /* MODE_TSX need not be followed by FUP */
2035 if (!decoder->pge) {
2036 intel_pt_update_in_tx(decoder);
2039 err = intel_pt_mode_tsx(decoder, &no_tip);
2044 case INTEL_PT_BAD: /* Does not happen */
2045 return intel_pt_bug(decoder);
2047 case INTEL_PT_PSBEND:
2053 case INTEL_PT_PTWRITE_IP:
2054 decoder->fup_ptw_payload = decoder->packet.payload;
2055 err = intel_pt_get_next_packet(decoder);
2058 if (decoder->packet.type == INTEL_PT_FUP) {
2059 decoder->set_fup_ptw = true;
2062 intel_pt_log_at("ERROR: Missing FUP after PTWRITE",
2067 case INTEL_PT_PTWRITE:
2068 decoder->state.type = INTEL_PT_PTW;
2069 decoder->state.from_ip = decoder->ip;
2070 decoder->state.to_ip = 0;
2071 decoder->state.ptw_payload = decoder->packet.payload;
2074 case INTEL_PT_MWAIT:
2075 decoder->fup_mwait_payload = decoder->packet.payload;
2076 decoder->set_fup_mwait = true;
2080 if (decoder->set_fup_mwait) {
2081 decoder->fup_pwre_payload =
2082 decoder->packet.payload;
2083 decoder->set_fup_pwre = true;
2086 decoder->state.type = INTEL_PT_PWR_ENTRY;
2087 decoder->state.from_ip = decoder->ip;
2088 decoder->state.to_ip = 0;
2089 decoder->state.pwrx_payload = decoder->packet.payload;
2092 case INTEL_PT_EXSTOP_IP:
2093 err = intel_pt_get_next_packet(decoder);
2096 if (decoder->packet.type == INTEL_PT_FUP) {
2097 decoder->set_fup_exstop = true;
2100 intel_pt_log_at("ERROR: Missing FUP after EXSTOP",
2105 case INTEL_PT_EXSTOP:
2106 decoder->state.type = INTEL_PT_EX_STOP;
2107 decoder->state.from_ip = decoder->ip;
2108 decoder->state.to_ip = 0;
2112 decoder->state.type = INTEL_PT_PWR_EXIT;
2113 decoder->state.from_ip = decoder->ip;
2114 decoder->state.to_ip = 0;
2115 decoder->state.pwrx_payload = decoder->packet.payload;
2119 intel_pt_bbp(decoder);
2123 intel_pt_bip(decoder);
2127 decoder->state.type = INTEL_PT_BLK_ITEMS;
2128 decoder->state.from_ip = decoder->ip;
2129 decoder->state.to_ip = 0;
2132 case INTEL_PT_BEP_IP:
2133 err = intel_pt_get_next_packet(decoder);
2136 if (decoder->packet.type == INTEL_PT_FUP) {
2137 decoder->set_fup_bep = true;
2140 intel_pt_log_at("ERROR: Missing FUP after BEP",
2146 return intel_pt_bug(decoder);
2151 static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
2153 return decoder->packet.count &&
2154 (decoder->have_last_ip || decoder->packet.count == 3 ||
2155 decoder->packet.count == 6);
2158 /* Walk PSB+ packets to get in sync. */
2159 static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
2163 decoder->in_psb = true;
2166 err = intel_pt_get_next_packet(decoder);
2170 switch (decoder->packet.type) {
2171 case INTEL_PT_TIP_PGD:
2172 decoder->continuous_period = false;
2174 case INTEL_PT_TIP_PGE:
2176 case INTEL_PT_PTWRITE:
2177 case INTEL_PT_PTWRITE_IP:
2178 case INTEL_PT_EXSTOP:
2179 case INTEL_PT_EXSTOP_IP:
2180 case INTEL_PT_MWAIT:
2186 case INTEL_PT_BEP_IP:
2187 intel_pt_log("ERROR: Unexpected packet\n");
2192 decoder->pge = true;
2193 if (intel_pt_have_ip(decoder)) {
2194 uint64_t current_ip = decoder->ip;
2196 intel_pt_set_ip(decoder);
2198 intel_pt_log_to("Setting IP",
2204 intel_pt_calc_mtc_timestamp(decoder);
2208 intel_pt_calc_tsc_timestamp(decoder);
2212 intel_pt_calc_tma(decoder);
2216 intel_pt_calc_cyc_timestamp(decoder);
2220 intel_pt_calc_cbr(decoder);
2224 decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
2227 case INTEL_PT_MODE_EXEC:
2228 decoder->exec_mode = decoder->packet.payload;
2231 case INTEL_PT_MODE_TSX:
2232 intel_pt_update_in_tx(decoder);
2235 case INTEL_PT_TRACESTOP:
2236 decoder->pge = false;
2237 decoder->continuous_period = false;
2238 intel_pt_clear_tx_flags(decoder);
2242 decoder->have_tma = false;
2243 intel_pt_log("ERROR: Unexpected packet\n");
2245 decoder->pkt_state = INTEL_PT_STATE_ERR4;
2247 decoder->pkt_state = INTEL_PT_STATE_ERR3;
2251 case INTEL_PT_BAD: /* Does not happen */
2252 err = intel_pt_bug(decoder);
2256 err = intel_pt_overflow(decoder);
2259 case INTEL_PT_PSBEND:
2272 decoder->in_psb = false;
2277 static int intel_pt_walk_to_ip(struct intel_pt_decoder *decoder)
2282 err = intel_pt_get_next_packet(decoder);
2286 switch (decoder->packet.type) {
2287 case INTEL_PT_TIP_PGD:
2288 decoder->continuous_period = false;
2289 decoder->pge = false;
2290 if (intel_pt_have_ip(decoder))
2291 intel_pt_set_ip(decoder);
2294 decoder->state.type |= INTEL_PT_TRACE_END;
2297 case INTEL_PT_TIP_PGE:
2298 decoder->pge = true;
2299 intel_pt_mtc_cyc_cnt_pge(decoder);
2300 if (intel_pt_have_ip(decoder))
2301 intel_pt_set_ip(decoder);
2304 decoder->state.type |= INTEL_PT_TRACE_BEGIN;
2308 decoder->pge = true;
2309 if (intel_pt_have_ip(decoder))
2310 intel_pt_set_ip(decoder);
2316 if (intel_pt_have_ip(decoder))
2317 intel_pt_set_ip(decoder);
2323 intel_pt_calc_mtc_timestamp(decoder);
2327 intel_pt_calc_tsc_timestamp(decoder);
2331 intel_pt_calc_tma(decoder);
2335 intel_pt_calc_cyc_timestamp(decoder);
2339 intel_pt_calc_cbr(decoder);
2343 decoder->cr3 = decoder->packet.payload & (BIT63 - 1);
2346 case INTEL_PT_MODE_EXEC:
2347 decoder->exec_mode = decoder->packet.payload;
2350 case INTEL_PT_MODE_TSX:
2351 intel_pt_update_in_tx(decoder);
2355 return intel_pt_overflow(decoder);
2357 case INTEL_PT_BAD: /* Does not happen */
2358 return intel_pt_bug(decoder);
2360 case INTEL_PT_TRACESTOP:
2361 decoder->pge = false;
2362 decoder->continuous_period = false;
2363 intel_pt_clear_tx_flags(decoder);
2364 decoder->have_tma = false;
2368 decoder->last_ip = 0;
2369 decoder->have_last_ip = true;
2370 intel_pt_clear_stack(&decoder->stack);
2371 err = intel_pt_walk_psb(decoder);
2375 /* Do not have a sample */
2376 decoder->state.type = 0;
2382 case INTEL_PT_PSBEND:
2386 case INTEL_PT_PTWRITE:
2387 case INTEL_PT_PTWRITE_IP:
2388 case INTEL_PT_EXSTOP:
2389 case INTEL_PT_EXSTOP_IP:
2390 case INTEL_PT_MWAIT:
2396 case INTEL_PT_BEP_IP:
2403 static int intel_pt_sync_ip(struct intel_pt_decoder *decoder)
2407 decoder->set_fup_tx_flags = false;
2408 decoder->set_fup_ptw = false;
2409 decoder->set_fup_mwait = false;
2410 decoder->set_fup_pwre = false;
2411 decoder->set_fup_exstop = false;
2412 decoder->set_fup_bep = false;
2414 if (!decoder->branch_enable) {
2415 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
2416 decoder->overflow = false;
2417 decoder->state.type = 0; /* Do not have a sample */
2421 intel_pt_log("Scanning for full IP\n");
2422 err = intel_pt_walk_to_ip(decoder);
2426 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
2427 decoder->overflow = false;
2429 decoder->state.from_ip = 0;
2430 decoder->state.to_ip = decoder->ip;
2431 intel_pt_log_to("Setting IP", decoder->ip);
2436 static int intel_pt_part_psb(struct intel_pt_decoder *decoder)
2438 const unsigned char *end = decoder->buf + decoder->len;
2441 for (i = INTEL_PT_PSB_LEN - 1; i; i--) {
2442 if (i > decoder->len)
2444 if (!memcmp(end - i, INTEL_PT_PSB_STR, i))
2450 static int intel_pt_rest_psb(struct intel_pt_decoder *decoder, int part_psb)
2452 size_t rest_psb = INTEL_PT_PSB_LEN - part_psb;
2453 const char *psb = INTEL_PT_PSB_STR;
2455 if (rest_psb > decoder->len ||
2456 memcmp(decoder->buf, psb + part_psb, rest_psb))
2462 static int intel_pt_get_split_psb(struct intel_pt_decoder *decoder,
2467 decoder->pos += decoder->len;
2470 ret = intel_pt_get_next_data(decoder, false);
2474 rest_psb = intel_pt_rest_psb(decoder, part_psb);
2478 decoder->pos -= part_psb;
2479 decoder->next_buf = decoder->buf + rest_psb;
2480 decoder->next_len = decoder->len - rest_psb;
2481 memcpy(decoder->temp_buf, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
2482 decoder->buf = decoder->temp_buf;
2483 decoder->len = INTEL_PT_PSB_LEN;
2488 static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder)
2490 unsigned char *next;
2493 intel_pt_log("Scanning for PSB\n");
2495 if (!decoder->len) {
2496 ret = intel_pt_get_next_data(decoder, false);
2501 next = memmem(decoder->buf, decoder->len, INTEL_PT_PSB_STR,
2506 part_psb = intel_pt_part_psb(decoder);
2508 ret = intel_pt_get_split_psb(decoder, part_psb);
2512 decoder->pos += decoder->len;
2518 decoder->pkt_step = next - decoder->buf;
2519 return intel_pt_get_next_packet(decoder);
2523 static int intel_pt_sync(struct intel_pt_decoder *decoder)
2527 decoder->pge = false;
2528 decoder->continuous_period = false;
2529 decoder->have_last_ip = false;
2530 decoder->last_ip = 0;
2532 intel_pt_clear_stack(&decoder->stack);
2534 err = intel_pt_scan_for_psb(decoder);
2538 decoder->have_last_ip = true;
2539 decoder->pkt_state = INTEL_PT_STATE_NO_IP;
2541 err = intel_pt_walk_psb(decoder);
2546 decoder->state.type = 0; /* Do not have a sample */
2547 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
2549 return intel_pt_sync_ip(decoder);
2555 static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
2557 uint64_t est = decoder->sample_insn_cnt << 1;
2559 if (!decoder->cbr || !decoder->max_non_turbo_ratio)
2562 est *= decoder->max_non_turbo_ratio;
2563 est /= decoder->cbr;
2565 return decoder->sample_timestamp + est;
2568 const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
2573 decoder->state.type = INTEL_PT_BRANCH;
2574 decoder->state.flags = 0;
2576 switch (decoder->pkt_state) {
2577 case INTEL_PT_STATE_NO_PSB:
2578 err = intel_pt_sync(decoder);
2580 case INTEL_PT_STATE_NO_IP:
2581 decoder->have_last_ip = false;
2582 decoder->last_ip = 0;
2585 case INTEL_PT_STATE_ERR_RESYNC:
2586 err = intel_pt_sync_ip(decoder);
2588 case INTEL_PT_STATE_IN_SYNC:
2589 err = intel_pt_walk_trace(decoder);
2591 case INTEL_PT_STATE_TNT:
2592 case INTEL_PT_STATE_TNT_CONT:
2593 err = intel_pt_walk_tnt(decoder);
2595 err = intel_pt_walk_trace(decoder);
2597 case INTEL_PT_STATE_TIP:
2598 case INTEL_PT_STATE_TIP_PGD:
2599 err = intel_pt_walk_tip(decoder);
2601 case INTEL_PT_STATE_FUP:
2602 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
2603 err = intel_pt_walk_fup(decoder);
2605 err = intel_pt_walk_fup_tip(decoder);
2607 decoder->pkt_state = INTEL_PT_STATE_FUP;
2609 case INTEL_PT_STATE_FUP_NO_TIP:
2610 decoder->pkt_state = INTEL_PT_STATE_IN_SYNC;
2611 err = intel_pt_walk_fup(decoder);
2613 err = intel_pt_walk_trace(decoder);
2616 err = intel_pt_bug(decoder);
2619 } while (err == -ENOLINK);
2622 decoder->state.err = intel_pt_ext_err(err);
2623 decoder->state.from_ip = decoder->ip;
2624 intel_pt_update_sample_time(decoder);
2625 decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
2627 decoder->state.err = 0;
2628 if (decoder->cbr != decoder->cbr_seen) {
2629 decoder->cbr_seen = decoder->cbr;
2630 if (!decoder->state.type) {
2631 decoder->state.from_ip = decoder->ip;
2632 decoder->state.to_ip = 0;
2634 decoder->state.type |= INTEL_PT_CBR_CHG;
2635 decoder->state.cbr_payload = decoder->cbr_payload;
2636 decoder->state.cbr = decoder->cbr;
2638 if (intel_pt_sample_time(decoder->pkt_state)) {
2639 intel_pt_update_sample_time(decoder);
2640 if (decoder->sample_cyc)
2641 decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
2645 decoder->state.timestamp = decoder->sample_timestamp;
2646 decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
2647 decoder->state.cr3 = decoder->cr3;
2648 decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
2649 decoder->state.tot_cyc_cnt = decoder->sample_tot_cyc_cnt;
2651 return &decoder->state;
2655 * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
2656 * @buf: pointer to buffer pointer
2657 * @len: size of buffer
2659 * Updates the buffer pointer to point to the start of the next PSB packet if
2660 * there is one, otherwise the buffer pointer is unchanged. If @buf is updated,
2661 * @len is adjusted accordingly.
2663 * Return: %true if a PSB packet is found, %false otherwise.
2665 static bool intel_pt_next_psb(unsigned char **buf, size_t *len)
2667 unsigned char *next;
2669 next = memmem(*buf, *len, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
2671 *len -= next - *buf;
2679 * intel_pt_step_psb - move buffer pointer to the start of the following PSB
2681 * @buf: pointer to buffer pointer
2682 * @len: size of buffer
2684 * Updates the buffer pointer to point to the start of the following PSB packet
2685 * (skipping the PSB at @buf itself) if there is one, otherwise the buffer
2686 * pointer is unchanged. If @buf is updated, @len is adjusted accordingly.
2688 * Return: %true if a PSB packet is found, %false otherwise.
2690 static bool intel_pt_step_psb(unsigned char **buf, size_t *len)
2692 unsigned char *next;
2697 next = memmem(*buf + 1, *len - 1, INTEL_PT_PSB_STR, INTEL_PT_PSB_LEN);
2699 *len -= next - *buf;
2707 * intel_pt_last_psb - find the last PSB packet in a buffer.
2709 * @len: size of buffer
2711 * This function finds the last PSB in a buffer.
2713 * Return: A pointer to the last PSB in @buf if found, %NULL otherwise.
2715 static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
2717 const char *n = INTEL_PT_PSB_STR;
2721 if (len < INTEL_PT_PSB_LEN)
2724 k = len - INTEL_PT_PSB_LEN + 1;
2726 p = memrchr(buf, n[0], k);
2729 if (!memcmp(p + 1, n + 1, INTEL_PT_PSB_LEN - 1))
2738 * intel_pt_next_tsc - find and return next TSC.
2740 * @len: size of buffer
2741 * @tsc: TSC value returned
2742 * @rem: returns remaining size when TSC is found
2744 * Find a TSC packet in @buf and return the TSC value. This function assumes
2745 * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
2746 * PSBEND packet is found.
2748 * Return: %true if TSC is found, false otherwise.
2750 static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
2753 enum intel_pt_pkt_ctx ctx = INTEL_PT_NO_CTX;
2754 struct intel_pt_pkt packet;
2758 ret = intel_pt_get_packet(buf, len, &packet, &ctx);
2761 if (packet.type == INTEL_PT_TSC) {
2762 *tsc = packet.payload;
2766 if (packet.type == INTEL_PT_PSBEND)
2775 * intel_pt_tsc_cmp - compare 7-byte TSCs.
2776 * @tsc1: first TSC to compare
2777 * @tsc2: second TSC to compare
2779 * This function compares 7-byte TSC values allowing for the possibility that
2780 * TSC wrapped around. Generally it is not possible to know if TSC has wrapped
2781 * around so for that purpose this function assumes the absolute difference is
2782 * less than half the maximum difference.
2784 * Return: %-1 if @tsc1 is before @tsc2, %0 if @tsc1 == @tsc2, %1 if @tsc1 is
2787 static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
2789 const uint64_t halfway = (1ULL << 55);
2795 if (tsc2 - tsc1 < halfway)
2800 if (tsc1 - tsc2 < halfway)
2807 #define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
2810 * adj_for_padding - adjust overlap to account for padding.
2811 * @buf_b: second buffer
2812 * @buf_a: first buffer
2813 * @len_a: size of first buffer
2815 * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
2818 * Return: A pointer into @buf_b from where non-overlapped data starts
2820 static unsigned char *adj_for_padding(unsigned char *buf_b,
2821 unsigned char *buf_a, size_t len_a)
2823 unsigned char *p = buf_b - MAX_PADDING;
2824 unsigned char *q = buf_a + len_a - MAX_PADDING;
2827 for (i = MAX_PADDING; i; i--, p++, q++) {
2836 * intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
2838 * @buf_a: first buffer
2839 * @len_a: size of first buffer
2840 * @buf_b: second buffer
2841 * @len_b: size of second buffer
2842 * @consecutive: returns true if there is data in buf_b that is consecutive
2845 * If the trace contains TSC we can look at the last TSC of @buf_a and the
2846 * first TSC of @buf_b in order to determine if the buffers overlap, and then
2847 * walk forward in @buf_b until a later TSC is found. A precondition is that
2848 * @buf_a and @buf_b are positioned at a PSB.
2850 * Return: A pointer into @buf_b from where non-overlapped data starts, or
2851 * @buf_b + @len_b if there is no non-overlapped data.
2853 static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
2855 unsigned char *buf_b,
2856 size_t len_b, bool *consecutive)
2858 uint64_t tsc_a, tsc_b;
2860 size_t len, rem_a, rem_b;
2862 p = intel_pt_last_psb(buf_a, len_a);
2864 return buf_b; /* No PSB in buf_a => no overlap */
2866 len = len_a - (p - buf_a);
2867 if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
2868 /* The last PSB+ in buf_a is incomplete, so go back one more */
2870 p = intel_pt_last_psb(buf_a, len_a);
2872 return buf_b; /* No full PSB+ => assume no overlap */
2873 len = len_a - (p - buf_a);
2874 if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
2875 return buf_b; /* No TSC in buf_a => assume no overlap */
2879 /* Ignore PSB+ with no TSC */
2880 if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
2881 int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
2883 /* Same TSC, so buffers are consecutive */
2884 if (!cmp && rem_b >= rem_a) {
2885 unsigned char *start;
2887 *consecutive = true;
2888 start = buf_b + len_b - (rem_b - rem_a);
2889 return adj_for_padding(start, buf_a, len_a);
2892 return buf_b; /* tsc_a < tsc_b => no overlap */
2895 if (!intel_pt_step_psb(&buf_b, &len_b))
2896 return buf_b + len_b; /* No PSB in buf_b => no data */
2901 * intel_pt_find_overlap - determine start of non-overlapped trace data.
2902 * @buf_a: first buffer
2903 * @len_a: size of first buffer
2904 * @buf_b: second buffer
2905 * @len_b: size of second buffer
2906 * @have_tsc: can use TSC packets to detect overlap
2907 * @consecutive: returns true if there is data in buf_b that is consecutive
2910 * When trace samples or snapshots are recorded there is the possibility that
2911 * the data overlaps. Note that, for the purposes of decoding, data is only
2912 * useful if it begins with a PSB packet.
2914 * Return: A pointer into @buf_b from where non-overlapped data starts, or
2915 * @buf_b + @len_b if there is no non-overlapped data.
2917 unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
2918 unsigned char *buf_b, size_t len_b,
2919 bool have_tsc, bool *consecutive)
2921 unsigned char *found;
2923 /* Buffer 'b' must start at PSB so throw away everything before that */
2924 if (!intel_pt_next_psb(&buf_b, &len_b))
2925 return buf_b + len_b; /* No PSB */
2927 if (!intel_pt_next_psb(&buf_a, &len_a))
2928 return buf_b; /* No overlap */
2931 found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
2938 * Buffer 'b' cannot end within buffer 'a' so, for comparison purposes,
2939 * we can ignore the first part of buffer 'a'.
2941 while (len_b < len_a) {
2942 if (!intel_pt_step_psb(&buf_a, &len_a))
2943 return buf_b; /* No overlap */
2946 /* Now len_b >= len_a */
2948 /* Potential overlap so check the bytes */
2949 found = memmem(buf_a, len_a, buf_b, len_a);
2951 *consecutive = true;
2952 return adj_for_padding(buf_b + len_a, buf_a, len_a);
2955 /* Try again at next PSB in buffer 'a' */
2956 if (!intel_pt_step_psb(&buf_a, &len_a))
2957 return buf_b; /* No overlap */
2962 * struct fast_forward_data - data used by intel_pt_ff_cb().
2963 * @timestamp: timestamp to fast forward towards
2964 * @buf_timestamp: buffer timestamp of last buffer with trace data earlier than
2965 * the fast forward timestamp.
2967 struct fast_forward_data {
2969 uint64_t buf_timestamp;
2973 * intel_pt_ff_cb - fast forward lookahead callback.
2974 * @buffer: Intel PT trace buffer
2975 * @data: opaque pointer to fast forward data (struct fast_forward_data)
2977 * Determine if @buffer trace is past the fast forward timestamp.
2979 * Return: 1 (stop lookahead) if @buffer trace is past the fast forward
2980 * timestamp, and 0 otherwise.
2982 static int intel_pt_ff_cb(struct intel_pt_buffer *buffer, void *data)
2984 struct fast_forward_data *d = data;
2990 buf = (unsigned char *)buffer->buf;
2993 if (!intel_pt_next_psb(&buf, &len) ||
2994 !intel_pt_next_tsc(buf, len, &tsc, &rem))
2997 tsc = intel_pt_8b_tsc(tsc, buffer->ref_timestamp);
2999 intel_pt_log("Buffer 1st timestamp " x64_fmt " ref timestamp " x64_fmt "\n",
3000 tsc, buffer->ref_timestamp);
3003 * If the buffer contains a timestamp earlier that the fast forward
3004 * timestamp, then record it, else stop.
3006 if (tsc < d->timestamp)
3007 d->buf_timestamp = buffer->ref_timestamp;
3015 * intel_pt_fast_forward - reposition decoder forwards.
3016 * @decoder: Intel PT decoder
3017 * @timestamp: timestamp to fast forward towards
3019 * Reposition decoder at the last PSB with a timestamp earlier than @timestamp.
3021 * Return: 0 on success or negative error code on failure.
3023 int intel_pt_fast_forward(struct intel_pt_decoder *decoder, uint64_t timestamp)
3025 struct fast_forward_data d = { .timestamp = timestamp };
3030 intel_pt_log("Fast forward towards timestamp " x64_fmt "\n", timestamp);
3032 /* Find buffer timestamp of buffer to fast forward to */
3033 err = decoder->lookahead(decoder->data, intel_pt_ff_cb, &d);
3037 /* Walk to buffer with same buffer timestamp */
3038 if (d.buf_timestamp) {
3040 decoder->pos += decoder->len;
3042 err = intel_pt_get_next_data(decoder, true);
3043 /* -ENOLINK means non-consecutive trace */
3044 if (err && err != -ENOLINK)
3046 } while (decoder->buf_timestamp != d.buf_timestamp);
3052 buf = (unsigned char *)decoder->buf;
3055 if (!intel_pt_next_psb(&buf, &len))
3059 * Walk PSBs while the PSB timestamp is less than the fast forward
3066 if (!intel_pt_next_tsc(buf, len, &tsc, &rem))
3068 tsc = intel_pt_8b_tsc(tsc, decoder->buf_timestamp);
3070 * A TSC packet can slip past MTC packets but, after fast
3071 * forward, decoding starts at the TSC timestamp. That means
3072 * the timestamps may not be exactly the same as the timestamps
3073 * that would have been decoded without fast forward.
3075 if (tsc < timestamp) {
3076 intel_pt_log("Fast forward to next PSB timestamp " x64_fmt "\n", tsc);
3077 decoder->pos += decoder->len - len;
3080 intel_pt_reposition(decoder);
3084 } while (intel_pt_step_psb(&buf, &len));