1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (c) 2013 Google, Inc
10 #include <asm/unaligned.h>
11 #include <u-boot/crc.h>
13 /* TPM NVRAM location indices. */
14 #define FIRMWARE_NV_INDEX 0x1007
15 #define KERNEL_NV_INDEX 0x1008
16 #define BACKUP_NV_INDEX 0x1009
17 #define FWMP_NV_INDEX 0x100a
18 #define REC_HASH_NV_INDEX 0x100b
19 #define REC_HASH_NV_SIZE VB2_SHA256_DIGEST_SIZE
21 #define NV_DATA_PUBLIC_PERMISSIONS_OFFSET 60
23 /* Kernel TPM space - KERNEL_NV_INDEX, locked with physical presence */
24 #define ROLLBACK_SPACE_KERNEL_VERSION 2
25 #define ROLLBACK_SPACE_KERNEL_UID 0x4752574C /* 'GRWL' */
27 struct rollback_space_kernel {
28 /* Struct version, for backwards compatibility */
29 uint8_t struct_version;
30 /* Unique ID to detect space redefinition */
33 uint32_t kernel_versions;
34 /* Reserved for future expansion */
36 /* Checksum (v2 and later only) */
38 } __packed rollback_space_kernel;
41 * These numbers derive from adding the sizes of command fields as shown in
42 * the TPM commands manual.
44 #define TPM_REQUEST_HEADER_LENGTH 10
45 #define TPM_RESPONSE_HEADER_LENGTH 10
47 /* These are the different non-volatile spaces that we emulate */
59 /* Size of each non-volatile space */
60 #define NV_DATA_SIZE 0x20
64 u8 data[NV_DATA_SIZE];
68 * Information about our TPM emulation. This is preserved in the sandbox
69 * state file if enabled.
71 static struct tpm_state {
73 struct nvdata_state nvdata[NV_SEQ_COUNT];
77 * sandbox_tpm_read_state() - read the sandbox EC state from the state file
79 * If data is available, then blob and node will provide access to it. If
80 * not this function sets up an empty TPM.
82 * @blob: Pointer to device tree blob, or NULL if no data to read
83 * @node: Node offset to read from
85 static int sandbox_tpm_read_state(const void *blob, int node)
94 for (i = 0; i < NV_SEQ_COUNT; i++) {
97 sprintf(prop_name, "nvdata%d", i);
98 prop = fdt_getprop(blob, node, prop_name, &len);
99 if (prop && len == NV_DATA_SIZE) {
100 memcpy(g_state.nvdata[i].data, prop, NV_DATA_SIZE);
101 g_state.nvdata[i].present = true;
104 g_state.valid = true;
110 * cros_ec_write_state() - Write out our state to the state file
112 * The caller will ensure that there is a node ready for the state. The node
113 * may already contain the old state, in which case it is overridden.
115 * @blob: Device tree blob holding state
116 * @node: Node to write our state into
118 static int sandbox_tpm_write_state(void *blob, int node)
123 * We are guaranteed enough space to write basic properties.
124 * We could use fdt_add_subnode() to put each set of data in its
125 * own node - perhaps useful if we add access informaiton to each.
127 for (i = 0; i < NV_SEQ_COUNT; i++) {
130 if (g_state.nvdata[i].present) {
131 sprintf(prop_name, "nvdata%d", i);
132 fdt_setprop(blob, node, prop_name,
133 g_state.nvdata[i].data, NV_DATA_SIZE);
140 SANDBOX_STATE_IO(sandbox_tpm, "google,sandbox-tpm", sandbox_tpm_read_state,
141 sandbox_tpm_write_state);
143 static int index_to_seq(uint32_t index)
146 case FIRMWARE_NV_INDEX:
147 return NV_SEQ_FIRMWARE;
148 case KERNEL_NV_INDEX:
149 return NV_SEQ_KERNEL;
150 case BACKUP_NV_INDEX:
151 return NV_SEQ_BACKUP;
154 case REC_HASH_NV_INDEX:
155 return NV_SEQ_REC_HASH;
157 return NV_GLOBAL_LOCK;
160 printf("Invalid nv index %#x\n", index);
164 static void handle_cap_flag_space(u8 **datap, uint index)
166 struct tpm_nv_data_public pub;
168 /* TPM_NV_PER_PPWRITE */
169 memset(&pub, '\0', sizeof(pub));
170 pub.nv_index = __cpu_to_be32(index);
171 pub.pcr_info_read.pcr_selection.size_of_select = __cpu_to_be16(
172 sizeof(pub.pcr_info_read.pcr_selection.pcr_select));
173 pub.permission.attributes = __cpu_to_be32(1);
174 pub.pcr_info_write = pub.pcr_info_read;
175 memcpy(*datap, &pub, sizeof(pub));
176 *datap += sizeof(pub);
179 static int sandbox_tpm_xfer(struct udevice *dev, const uint8_t *sendbuf,
180 size_t send_size, uint8_t *recvbuf,
183 struct tpm_state *tpm = dev_get_priv(dev);
184 uint32_t code, index, length, type;
188 code = get_unaligned_be32(sendbuf + sizeof(uint16_t) +
191 printf("tpm: %zd bytes, recv_len %zd, cmd = %x\n", send_size,
193 print_buffer(0, sendbuf, 1, send_size, 0);
196 case TPM_CMD_GET_CAPABILITY:
197 type = get_unaligned_be32(sendbuf + 14);
200 index = get_unaligned_be32(sendbuf + 18);
201 printf("Get flags index %#02x\n", index);
203 memset(recvbuf, '\0', *recv_len);
204 data = recvbuf + TPM_RESPONSE_HEADER_LENGTH +
207 case FIRMWARE_NV_INDEX:
209 case KERNEL_NV_INDEX:
210 handle_cap_flag_space(&data, index);
211 *recv_len = data - recvbuf -
212 TPM_RESPONSE_HEADER_LENGTH -
215 case TPM_CAP_FLAG_PERMANENT: {
216 struct tpm_permanent_flags *pflags;
218 pflags = (struct tpm_permanent_flags *)data;
219 memset(pflags, '\0', sizeof(*pflags));
220 put_unaligned_be32(TPM_TAG_PERMANENT_FLAGS,
222 *recv_len = TPM_HEADER_SIZE + 4 +
227 printf(" ** Unknown flags index %x\n", index);
230 put_unaligned_be32(*recv_len,
232 TPM_RESPONSE_HEADER_LENGTH);
234 case TPM_CAP_NV_INDEX:
235 index = get_unaligned_be32(sendbuf + 18);
236 printf("Get cap nv index %#02x\n", index);
237 put_unaligned_be32(22, recvbuf +
238 TPM_RESPONSE_HEADER_LENGTH);
241 printf(" ** Unknown 0x65 command type %#02x\n",
246 case TPM_CMD_NV_WRITE_VALUE:
247 index = get_unaligned_be32(sendbuf + 10);
248 length = get_unaligned_be32(sendbuf + 18);
249 seq = index_to_seq(index);
252 printf("tpm: nvwrite index=%#02x, len=%#02x\n", index, length);
253 memcpy(&tpm->nvdata[seq].data, sendbuf + 22, length);
254 tpm->nvdata[seq].present = true;
256 memset(recvbuf, '\0', *recv_len);
258 case TPM_CMD_NV_READ_VALUE: /* nvread */
259 index = get_unaligned_be32(sendbuf + 10);
260 length = get_unaligned_be32(sendbuf + 18);
261 seq = index_to_seq(index);
264 printf("tpm: nvread index=%#02x, len=%#02x, seq=%#02x\n", index,
266 *recv_len = TPM_RESPONSE_HEADER_LENGTH + sizeof(uint32_t) +
268 memset(recvbuf, '\0', *recv_len);
269 put_unaligned_be32(length, recvbuf +
270 TPM_RESPONSE_HEADER_LENGTH);
271 if (seq == NV_SEQ_KERNEL) {
272 struct rollback_space_kernel rsk;
274 data = recvbuf + TPM_RESPONSE_HEADER_LENGTH +
276 memset(&rsk, 0, sizeof(struct rollback_space_kernel));
277 rsk.struct_version = 2;
278 rsk.uid = ROLLBACK_SPACE_KERNEL_UID;
279 rsk.crc8 = crc8(0, (unsigned char *)&rsk,
280 offsetof(struct rollback_space_kernel,
282 memcpy(data, &rsk, sizeof(rsk));
283 } else if (!tpm->nvdata[seq].present) {
284 put_unaligned_be32(TPM_BADINDEX, recvbuf +
285 sizeof(uint16_t) + sizeof(uint32_t));
287 memcpy(recvbuf + TPM_RESPONSE_HEADER_LENGTH +
288 sizeof(uint32_t), &tpm->nvdata[seq].data,
294 memset(recvbuf, '\0', *recv_len);
296 case TPM_CMD_NV_DEFINE_SPACE:
297 case 0x15: /* pcr read */
298 case 0x5d: /* force clear */
299 case 0x6f: /* physical enable */
300 case 0x72: /* physical set deactivated */
301 case 0x99: /* startup */
302 case 0x50: /* self test full */
303 case 0x4000000a: /* assert physical presence */
305 memset(recvbuf, '\0', *recv_len);
308 printf("Unknown tpm command %02x\n", code);
312 printf("tpm: rx recv_len %zd\n", *recv_len);
313 print_buffer(0, recvbuf, 1, *recv_len, 0);
319 static int sandbox_tpm_get_desc(struct udevice *dev, char *buf, int size)
324 return snprintf(buf, size, "sandbox TPM");
327 static int sandbox_tpm_probe(struct udevice *dev)
329 struct tpm_state *tpm = dev_get_priv(dev);
331 memcpy(tpm, &g_state, sizeof(*tpm));
336 static int sandbox_tpm_open(struct udevice *dev)
341 static int sandbox_tpm_close(struct udevice *dev)
346 static const struct tpm_ops sandbox_tpm_ops = {
347 .open = sandbox_tpm_open,
348 .close = sandbox_tpm_close,
349 .get_desc = sandbox_tpm_get_desc,
350 .xfer = sandbox_tpm_xfer,
353 static const struct udevice_id sandbox_tpm_ids[] = {
354 { .compatible = "google,sandbox-tpm" },
358 U_BOOT_DRIVER(sandbox_tpm) = {
359 .name = "sandbox_tpm",
361 .of_match = sandbox_tpm_ids,
362 .ops = &sandbox_tpm_ops,
363 .probe = sandbox_tpm_probe,
364 .priv_auto_alloc_size = sizeof(struct tpm_state),