| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. |
| 4 | * Copyright (C) 2019-2024 Linaro Ltd. |
| 5 | */ |
| 6 | |
| 7 | #include <linux/bitfield.h> |
| 8 | #include <linux/bits.h> |
| 9 | #include <linux/device.h> |
| 10 | #include <linux/dma-direction.h> |
| 11 | #include <linux/types.h> |
| 12 | |
| 13 | #include "gsi.h" |
| 14 | #include "gsi_trans.h" |
| 15 | #include "ipa.h" |
| 16 | #include "ipa_cmd.h" |
| 17 | #include "ipa_endpoint.h" |
| 18 | #include "ipa_mem.h" |
| 19 | #include "ipa_reg.h" |
| 20 | #include "ipa_table.h" |
| 21 | |
| 22 | /** |
| 23 | * DOC: IPA Immediate Commands |
| 24 | * |
| 25 | * The AP command TX endpoint is used to issue immediate commands to the IPA. |
| 26 | * An immediate command is generally used to request the IPA do something |
| 27 | * other than data transfer to another endpoint. |
| 28 | * |
| 29 | * Immediate commands are represented by GSI transactions just like other |
| 30 | * transfer requests, and use a single GSI TRE. Each immediate command |
| 31 | * has a well-defined format, having a payload of a known length. This |
| 32 | * allows the transfer element's length field to be used to hold an |
| 33 | * immediate command's opcode. The payload for a command resides in AP |
| 34 | * memory and is described by a single scatterlist entry in its transaction. |
| 35 | * Commands do not require a transaction completion callback, and are |
| 36 | * always issued using gsi_trans_commit_wait(). |
| 37 | */ |
| 38 | |
| 39 | /* Some commands can wait until indicated pipeline stages are clear */ |
| 40 | enum pipeline_clear_options { |
| 41 | pipeline_clear_hps = 0x0, |
| 42 | pipeline_clear_src_grp = 0x1, |
| 43 | pipeline_clear_full = 0x2, |
| 44 | }; |
| 45 | |
| 46 | /* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */ |
| 47 | |
| 48 | struct ipa_cmd_hw_ip_fltrt_init { |
| 49 | __le64 hash_rules_addr; |
| 50 | __le64 flags; |
| 51 | __le64 nhash_rules_addr; |
| 52 | }; |
| 53 | |
| 54 | /* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */ |
| 55 | #define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0) |
| 56 | #define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12) |
| 57 | #define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28) |
| 58 | #define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40) |
| 59 | |
| 60 | /* IPA_CMD_HDR_INIT_LOCAL */ |
| 61 | |
| 62 | struct ipa_cmd_hw_hdr_init_local { |
| 63 | __le64 hdr_table_addr; |
| 64 | __le32 flags; |
| 65 | __le32 reserved; |
| 66 | }; |
| 67 | |
| 68 | /* Field masks for ipa_cmd_hw_hdr_init_local structure fields */ |
| 69 | #define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0) |
| 70 | #define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12) |
| 71 | |
| 72 | /* IPA_CMD_REGISTER_WRITE */ |
| 73 | |
| 74 | /* For IPA v4.0+, the pipeline clear options are encoded in the opcode */ |
| 75 | #define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8) |
| 76 | #define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9) |
| 77 | |
| 78 | struct ipa_cmd_register_write { |
| 79 | __le16 flags; /* Unused/reserved prior to IPA v4.0 */ |
| 80 | __le16 offset; |
| 81 | __le32 value; |
| 82 | __le32 value_mask; |
| 83 | __le32 clear_options; /* Unused/reserved for IPA v4.0+ */ |
| 84 | }; |
| 85 | |
| 86 | /* Field masks for ipa_cmd_register_write structure fields */ |
| 87 | /* The next field is present for IPA v4.0+ */ |
| 88 | #define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11) |
| 89 | /* The next field is not present for IPA v4.0+ */ |
| 90 | #define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15) |
| 91 | |
| 92 | /* The next field and its values are not present for IPA v4.0+ */ |
| 93 | #define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0) |
| 94 | |
| 95 | /* IPA_CMD_IP_PACKET_INIT */ |
| 96 | |
| 97 | struct ipa_cmd_ip_packet_init { |
| 98 | u8 dest_endpoint; /* Full 8 bits used for IPA v5.0+ */ |
| 99 | u8 reserved[7]; |
| 100 | }; |
| 101 | |
| 102 | /* Field mask for ipa_cmd_ip_packet_init dest_endpoint field (unused v5.0+) */ |
| 103 | #define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0) |
| 104 | |
| 105 | /* IPA_CMD_DMA_SHARED_MEM */ |
| 106 | |
| 107 | /* For IPA v4.0+, this opcode gets modified with pipeline clear options */ |
| 108 | |
| 109 | #define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8) |
| 110 | #define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9) |
| 111 | |
| 112 | struct ipa_cmd_hw_dma_mem_mem { |
| 113 | __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */ |
| 114 | __le16 size; |
| 115 | __le16 local_addr; |
| 116 | __le16 flags; |
| 117 | __le64 system_addr; |
| 118 | }; |
| 119 | |
| 120 | /* Flag allowing atomic clear of target region after reading data (v4.0+)*/ |
| 121 | #define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15) |
| 122 | |
| 123 | /* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */ |
| 124 | #define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0) |
| 125 | /* The next two fields are not present for IPA v4.0+ */ |
| 126 | #define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1) |
| 127 | #define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2) |
| 128 | |
| 129 | /* IPA_CMD_IP_PACKET_TAG_STATUS */ |
| 130 | |
| 131 | struct ipa_cmd_ip_packet_tag_status { |
| 132 | __le64 tag; |
| 133 | }; |
| 134 | |
| 135 | #define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16) |
| 136 | |
| 137 | /* Immediate command payload */ |
| 138 | union ipa_cmd_payload { |
| 139 | struct ipa_cmd_hw_ip_fltrt_init table_init; |
| 140 | struct ipa_cmd_hw_hdr_init_local hdr_init_local; |
| 141 | struct ipa_cmd_register_write register_write; |
| 142 | struct ipa_cmd_ip_packet_init ip_packet_init; |
| 143 | struct ipa_cmd_hw_dma_mem_mem dma_shared_mem; |
| 144 | struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status; |
| 145 | }; |
| 146 | |
| 147 | static void ipa_cmd_validate_build(void) |
| 148 | { |
| 149 | /* The size of a filter table needs to fit into fields in the |
| 150 | * ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables |
| 151 | * might not be used, non-hashed and hashed tables have the same |
| 152 | * maximum size. IPv4 and IPv6 filter tables have the same number |
| 153 | * of entries. |
| 154 | */ |
| 155 | /* Hashed and non-hashed fields are assumed to be the same size */ |
| 156 | BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) != |
| 157 | field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK)); |
| 158 | BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) != |
| 159 | field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK)); |
| 160 | |
| 161 | /* Prior to IPA v5.0, we supported no more than 32 endpoints, |
| 162 | * and this was reflected in some 5-bit fields that held |
| 163 | * endpoint numbers. Starting with IPA v5.0, the widths of |
| 164 | * these fields were extended to 8 bits, meaning up to 256 |
| 165 | * endpoints. If the driver claims to support more than |
| 166 | * that it's an error. |
| 167 | */ |
| 168 | BUILD_BUG_ON(IPA_ENDPOINT_MAX - 1 > U8_MAX); |
| 169 | } |
| 170 | |
| 171 | /* Validate a memory region holding a table */ |
| 172 | bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem, |
| 173 | bool route) |
| 174 | { |
| 175 | u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); |
| 176 | u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK); |
| 177 | const char *table = route ? "route" : "filter" ; |
| 178 | struct device *dev = ipa->dev; |
| 179 | u32 size; |
| 180 | |
| 181 | size = route ? ipa->route_count : ipa->filter_count + 1; |
| 182 | size *= sizeof(__le64); |
| 183 | |
| 184 | /* Size must fit in the immediate command field that holds it */ |
| 185 | if (size > size_max) { |
| 186 | dev_err(dev, "%s table region size too large\n" , table); |
| 187 | dev_err(dev, " (0x%04x > 0x%04x)\n" , size, size_max); |
| 188 | |
| 189 | return false; |
| 190 | } |
| 191 | |
| 192 | /* Offset must fit in the immediate command field that holds it */ |
| 193 | if (mem->offset > offset_max || |
| 194 | ipa->mem_offset > offset_max - mem->offset) { |
| 195 | dev_err(dev, "%s table region offset too large\n" , table); |
| 196 | dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n" , |
| 197 | ipa->mem_offset, mem->offset, offset_max); |
| 198 | |
| 199 | return false; |
| 200 | } |
| 201 | |
| 202 | return true; |
| 203 | } |
| 204 | |
| 205 | /* Validate the memory region that holds headers */ |
| 206 | static bool (struct ipa *ipa) |
| 207 | { |
| 208 | struct device *dev = ipa->dev; |
| 209 | const struct ipa_mem *mem; |
| 210 | u32 offset_max; |
| 211 | u32 size_max; |
| 212 | u32 offset; |
| 213 | u32 size; |
| 214 | |
| 215 | /* In ipa_cmd_hdr_init_local_add() we record the offset and size of |
| 216 | * the header table memory area in an immediate command. Make sure |
| 217 | * the offset and size fit in the fields that need to hold them, and |
| 218 | * that the entire range is within the overall IPA memory range. |
| 219 | */ |
| 220 | offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK); |
| 221 | size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK); |
| 222 | |
| 223 | /* The header memory area contains both the modem and AP header |
| 224 | * regions. The modem portion defines the address of the region. |
| 225 | */ |
| 226 | mem = ipa_mem_find(ipa, mem_id: IPA_MEM_MODEM_HEADER); |
| 227 | offset = mem->offset; |
| 228 | size = mem->size; |
| 229 | |
| 230 | /* Make sure the offset fits in the IPA command */ |
| 231 | if (offset > offset_max || ipa->mem_offset > offset_max - offset) { |
| 232 | dev_err(dev, "header table region offset too large\n" ); |
| 233 | dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n" , |
| 234 | ipa->mem_offset, offset, offset_max); |
| 235 | |
| 236 | return false; |
| 237 | } |
| 238 | |
| 239 | /* Add the size of the AP portion (if defined) to the combined size */ |
| 240 | mem = ipa_mem_find(ipa, mem_id: IPA_MEM_AP_HEADER); |
| 241 | if (mem) |
| 242 | size += mem->size; |
| 243 | |
| 244 | /* Make sure the combined size fits in the IPA command */ |
| 245 | if (size > size_max) { |
| 246 | dev_err(dev, "header table region size too large\n" ); |
| 247 | dev_err(dev, " (0x%04x > 0x%08x)\n" , size, size_max); |
| 248 | |
| 249 | return false; |
| 250 | } |
| 251 | |
| 252 | return true; |
| 253 | } |
| 254 | |
| 255 | /* Indicate whether an offset can be used with a register_write command */ |
| 256 | static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa, |
| 257 | const char *name, u32 offset) |
| 258 | { |
| 259 | struct ipa_cmd_register_write *payload; |
| 260 | struct device *dev = ipa->dev; |
| 261 | u32 offset_max; |
| 262 | u32 bit_count; |
| 263 | |
| 264 | /* The maximum offset in a register_write immediate command depends |
| 265 | * on the version of IPA. A 16 bit offset is always supported, |
| 266 | * but starting with IPA v4.0 some additional high-order bits are |
| 267 | * allowed. |
| 268 | */ |
| 269 | bit_count = BITS_PER_BYTE * sizeof(payload->offset); |
| 270 | if (ipa->version >= IPA_VERSION_4_0) |
| 271 | bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK); |
| 272 | BUILD_BUG_ON(bit_count > 32); |
| 273 | offset_max = ~0U >> (32 - bit_count); |
| 274 | |
| 275 | /* Make sure the offset can be represented by the field(s) |
| 276 | * that holds it. Also make sure the offset is not outside |
| 277 | * the overall IPA memory range. |
| 278 | */ |
| 279 | if (offset > offset_max || ipa->mem_offset > offset_max - offset) { |
| 280 | dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n" , |
| 281 | name, ipa->mem_offset, offset, offset_max); |
| 282 | return false; |
| 283 | } |
| 284 | |
| 285 | return true; |
| 286 | } |
| 287 | |
| 288 | /* Check whether offsets passed to register_write are valid */ |
| 289 | static bool ipa_cmd_register_write_valid(struct ipa *ipa) |
| 290 | { |
| 291 | const struct reg *reg; |
| 292 | const char *name; |
| 293 | u32 offset; |
| 294 | |
| 295 | /* If hashed tables are supported, ensure the hash flush register |
| 296 | * offset will fit in a register write IPA immediate command. |
| 297 | */ |
| 298 | if (ipa_table_hash_support(ipa)) { |
| 299 | if (ipa->version < IPA_VERSION_5_0) |
| 300 | reg = ipa_reg(ipa, reg_id: FILT_ROUT_HASH_FLUSH); |
| 301 | else |
| 302 | reg = ipa_reg(ipa, reg_id: FILT_ROUT_CACHE_FLUSH); |
| 303 | |
| 304 | offset = reg_offset(reg); |
| 305 | name = "filter/route hash flush" ; |
| 306 | if (!ipa_cmd_register_write_offset_valid(ipa, name, offset)) |
| 307 | return false; |
| 308 | } |
| 309 | |
| 310 | /* Each endpoint can have a status endpoint associated with it, |
| 311 | * and this is recorded in an endpoint register. If the modem |
| 312 | * crashes, we reset the status endpoint for all modem endpoints |
| 313 | * using a register write IPA immediate command. Make sure the |
| 314 | * worst case (highest endpoint number) offset of that endpoint |
| 315 | * fits in the register write command field(s) that must hold it. |
| 316 | */ |
| 317 | reg = ipa_reg(ipa, reg_id: ENDP_STATUS); |
| 318 | offset = reg_n_offset(reg, n: IPA_ENDPOINT_COUNT - 1); |
| 319 | name = "maximal endpoint status" ; |
| 320 | if (!ipa_cmd_register_write_offset_valid(ipa, name, offset)) |
| 321 | return false; |
| 322 | |
| 323 | return true; |
| 324 | } |
| 325 | |
| 326 | int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max) |
| 327 | { |
| 328 | struct gsi_trans_info *trans_info = &channel->trans_info; |
| 329 | struct device *dev = channel->gsi->dev; |
| 330 | |
| 331 | /* Command payloads are allocated one at a time, but a single |
| 332 | * transaction can require up to the maximum supported by the |
| 333 | * channel; treat them as if they were allocated all at once. |
| 334 | */ |
| 335 | return gsi_trans_pool_init_dma(dev, pool: &trans_info->cmd_pool, |
| 336 | size: sizeof(union ipa_cmd_payload), |
| 337 | count: tre_max, max_alloc: channel->trans_tre_max); |
| 338 | } |
| 339 | |
| 340 | void ipa_cmd_pool_exit(struct gsi_channel *channel) |
| 341 | { |
| 342 | struct gsi_trans_info *trans_info = &channel->trans_info; |
| 343 | struct device *dev = channel->gsi->dev; |
| 344 | |
| 345 | gsi_trans_pool_exit_dma(dev, pool: &trans_info->cmd_pool); |
| 346 | } |
| 347 | |
| 348 | static union ipa_cmd_payload * |
| 349 | ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr) |
| 350 | { |
| 351 | struct gsi_trans_info *trans_info; |
| 352 | struct ipa_endpoint *endpoint; |
| 353 | |
| 354 | endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; |
| 355 | trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info; |
| 356 | |
| 357 | return gsi_trans_pool_alloc_dma(pool: &trans_info->cmd_pool, addr); |
| 358 | } |
| 359 | |
| 360 | /* If hash_size is 0, hash_offset and hash_addr ignored. */ |
| 361 | void ipa_cmd_table_init_add(struct gsi_trans *trans, |
| 362 | enum ipa_cmd_opcode opcode, u16 size, u32 offset, |
| 363 | dma_addr_t addr, u16 hash_size, u32 hash_offset, |
| 364 | dma_addr_t hash_addr) |
| 365 | { |
| 366 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
| 367 | struct ipa_cmd_hw_ip_fltrt_init *payload; |
| 368 | union ipa_cmd_payload *cmd_payload; |
| 369 | dma_addr_t payload_addr; |
| 370 | u64 val; |
| 371 | |
| 372 | /* Record the non-hash table offset and size */ |
| 373 | offset += ipa->mem_offset; |
| 374 | val = u64_encode_bits(v: offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK); |
| 375 | val |= u64_encode_bits(v: size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK); |
| 376 | |
| 377 | /* The hash table offset and address are zero if its size is 0 */ |
| 378 | if (hash_size) { |
| 379 | /* Record the hash table offset and size */ |
| 380 | hash_offset += ipa->mem_offset; |
| 381 | val |= u64_encode_bits(v: hash_offset, |
| 382 | IP_FLTRT_FLAGS_HASH_ADDR_FMASK); |
| 383 | val |= u64_encode_bits(v: hash_size, |
| 384 | IP_FLTRT_FLAGS_HASH_SIZE_FMASK); |
| 385 | } |
| 386 | |
| 387 | cmd_payload = ipa_cmd_payload_alloc(ipa, addr: &payload_addr); |
| 388 | payload = &cmd_payload->table_init; |
| 389 | |
| 390 | /* Fill in all offsets and sizes and the non-hash table address */ |
| 391 | if (hash_size) |
| 392 | payload->hash_rules_addr = cpu_to_le64(hash_addr); |
| 393 | payload->flags = cpu_to_le64(val); |
| 394 | payload->nhash_rules_addr = cpu_to_le64(addr); |
| 395 | |
| 396 | gsi_trans_cmd_add(trans, buf: payload, size: sizeof(*payload), addr: payload_addr, |
| 397 | opcode); |
| 398 | } |
| 399 | |
| 400 | /* Initialize header space in IPA-local memory */ |
| 401 | void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size, |
| 402 | dma_addr_t addr) |
| 403 | { |
| 404 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
| 405 | enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL; |
| 406 | struct ipa_cmd_hw_hdr_init_local *payload; |
| 407 | union ipa_cmd_payload *cmd_payload; |
| 408 | dma_addr_t payload_addr; |
| 409 | u32 flags; |
| 410 | |
| 411 | offset += ipa->mem_offset; |
| 412 | |
| 413 | /* With this command we tell the IPA where in its local memory the |
| 414 | * header tables reside. The content of the buffer provided is |
| 415 | * also written via DMA into that space. The IPA hardware owns |
| 416 | * the table, but the AP must initialize it. |
| 417 | */ |
| 418 | cmd_payload = ipa_cmd_payload_alloc(ipa, addr: &payload_addr); |
| 419 | payload = &cmd_payload->hdr_init_local; |
| 420 | |
| 421 | payload->hdr_table_addr = cpu_to_le64(addr); |
| 422 | flags = u32_encode_bits(v: size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK); |
| 423 | flags |= u32_encode_bits(v: offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK); |
| 424 | payload->flags = cpu_to_le32(flags); |
| 425 | |
| 426 | gsi_trans_cmd_add(trans, buf: payload, size: sizeof(*payload), addr: payload_addr, |
| 427 | opcode); |
| 428 | } |
| 429 | |
| 430 | void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value, |
| 431 | u32 mask, bool clear_full) |
| 432 | { |
| 433 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
| 434 | struct ipa_cmd_register_write *payload; |
| 435 | union ipa_cmd_payload *cmd_payload; |
| 436 | u32 opcode = IPA_CMD_REGISTER_WRITE; |
| 437 | dma_addr_t payload_addr; |
| 438 | u32 clear_option; |
| 439 | u32 options; |
| 440 | u16 flags; |
| 441 | |
| 442 | /* pipeline_clear_src_grp is not used */ |
| 443 | clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps; |
| 444 | |
| 445 | /* IPA v4.0+ represents the pipeline clear options in the opcode. It |
| 446 | * also supports a larger offset by encoding additional high-order |
| 447 | * bits in the payload flags field. |
| 448 | */ |
| 449 | if (ipa->version >= IPA_VERSION_4_0) { |
| 450 | u16 offset_high; |
| 451 | u32 val; |
| 452 | |
| 453 | /* Opcode encodes pipeline clear options */ |
| 454 | /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */ |
| 455 | val = u16_encode_bits(v: clear_option, |
| 456 | REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK); |
| 457 | opcode |= val; |
| 458 | |
| 459 | /* Extract the high 4 bits from the offset */ |
| 460 | offset_high = (u16)u32_get_bits(v: offset, GENMASK(19, 16)); |
| 461 | offset &= (1 << 16) - 1; |
| 462 | |
| 463 | /* Extract the top 4 bits and encode it into the flags field */ |
| 464 | flags = u16_encode_bits(v: offset_high, |
| 465 | REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK); |
| 466 | options = 0; /* reserved */ |
| 467 | |
| 468 | } else { |
| 469 | flags = 0; /* SKIP_CLEAR flag is always 0 */ |
| 470 | options = u16_encode_bits(v: clear_option, |
| 471 | REGISTER_WRITE_CLEAR_OPTIONS_FMASK); |
| 472 | } |
| 473 | |
| 474 | cmd_payload = ipa_cmd_payload_alloc(ipa, addr: &payload_addr); |
| 475 | payload = &cmd_payload->register_write; |
| 476 | |
| 477 | payload->flags = cpu_to_le16(flags); |
| 478 | payload->offset = cpu_to_le16((u16)offset); |
| 479 | payload->value = cpu_to_le32(value); |
| 480 | payload->value_mask = cpu_to_le32(mask); |
| 481 | payload->clear_options = cpu_to_le32(options); |
| 482 | |
| 483 | gsi_trans_cmd_add(trans, buf: payload, size: sizeof(*payload), addr: payload_addr, |
| 484 | opcode); |
| 485 | } |
| 486 | |
| 487 | /* Skip IP packet processing on the next data transfer on a TX channel */ |
| 488 | static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id) |
| 489 | { |
| 490 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
| 491 | enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT; |
| 492 | struct ipa_cmd_ip_packet_init *payload; |
| 493 | union ipa_cmd_payload *cmd_payload; |
| 494 | dma_addr_t payload_addr; |
| 495 | |
| 496 | cmd_payload = ipa_cmd_payload_alloc(ipa, addr: &payload_addr); |
| 497 | payload = &cmd_payload->ip_packet_init; |
| 498 | |
| 499 | if (ipa->version < IPA_VERSION_5_0) { |
| 500 | payload->dest_endpoint = |
| 501 | u8_encode_bits(v: endpoint_id, |
| 502 | IPA_PACKET_INIT_DEST_ENDPOINT_FMASK); |
| 503 | } else { |
| 504 | payload->dest_endpoint = endpoint_id; |
| 505 | } |
| 506 | |
| 507 | gsi_trans_cmd_add(trans, buf: payload, size: sizeof(*payload), addr: payload_addr, |
| 508 | opcode); |
| 509 | } |
| 510 | |
| 511 | /* Use a DMA command to read or write a block of IPA-resident memory */ |
| 512 | void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size, |
| 513 | dma_addr_t addr, bool toward_ipa) |
| 514 | { |
| 515 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
| 516 | enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM; |
| 517 | struct ipa_cmd_hw_dma_mem_mem *payload; |
| 518 | union ipa_cmd_payload *cmd_payload; |
| 519 | dma_addr_t payload_addr; |
| 520 | u16 flags; |
| 521 | |
| 522 | /* size and offset must fit in 16 bit fields */ |
| 523 | WARN_ON(!size); |
| 524 | WARN_ON(size > U16_MAX); |
| 525 | WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset); |
| 526 | |
| 527 | offset += ipa->mem_offset; |
| 528 | |
| 529 | cmd_payload = ipa_cmd_payload_alloc(ipa, addr: &payload_addr); |
| 530 | payload = &cmd_payload->dma_shared_mem; |
| 531 | |
| 532 | /* payload->clear_after_read was reserved prior to IPA v4.0. It's |
| 533 | * never needed for current code, so it's 0 regardless of version. |
| 534 | */ |
| 535 | payload->size = cpu_to_le16(size); |
| 536 | payload->local_addr = cpu_to_le16(offset); |
| 537 | /* payload->flags: |
| 538 | * direction: 0 = write to IPA, 1 read from IPA |
| 539 | * Starting at v4.0 these are reserved; either way, all zero: |
| 540 | * pipeline clear: 0 = wait for pipeline clear (don't skip) |
| 541 | * clear_options: 0 = pipeline_clear_hps |
| 542 | * Instead, for v4.0+ these are encoded in the opcode. But again |
| 543 | * since both values are 0 we won't bother OR'ing them in. |
| 544 | */ |
| 545 | flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK; |
| 546 | payload->flags = cpu_to_le16(flags); |
| 547 | payload->system_addr = cpu_to_le64(addr); |
| 548 | |
| 549 | gsi_trans_cmd_add(trans, buf: payload, size: sizeof(*payload), addr: payload_addr, |
| 550 | opcode); |
| 551 | } |
| 552 | |
| 553 | static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans) |
| 554 | { |
| 555 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
| 556 | enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS; |
| 557 | struct ipa_cmd_ip_packet_tag_status *payload; |
| 558 | union ipa_cmd_payload *cmd_payload; |
| 559 | dma_addr_t payload_addr; |
| 560 | |
| 561 | cmd_payload = ipa_cmd_payload_alloc(ipa, addr: &payload_addr); |
| 562 | payload = &cmd_payload->ip_packet_tag_status; |
| 563 | |
| 564 | payload->tag = le64_encode_bits(v: 0, IP_PACKET_TAG_STATUS_TAG_FMASK); |
| 565 | |
| 566 | gsi_trans_cmd_add(trans, buf: payload, size: sizeof(*payload), addr: payload_addr, |
| 567 | opcode); |
| 568 | } |
| 569 | |
| 570 | /* Issue a small command TX data transfer */ |
| 571 | static void ipa_cmd_transfer_add(struct gsi_trans *trans) |
| 572 | { |
| 573 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
| 574 | enum ipa_cmd_opcode opcode = IPA_CMD_NONE; |
| 575 | union ipa_cmd_payload *payload; |
| 576 | dma_addr_t payload_addr; |
| 577 | |
| 578 | /* Just transfer a zero-filled payload structure */ |
| 579 | payload = ipa_cmd_payload_alloc(ipa, addr: &payload_addr); |
| 580 | |
| 581 | gsi_trans_cmd_add(trans, buf: payload, size: sizeof(*payload), addr: payload_addr, |
| 582 | opcode); |
| 583 | } |
| 584 | |
| 585 | /* Add immediate commands to a transaction to clear the hardware pipeline */ |
| 586 | void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans) |
| 587 | { |
| 588 | struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); |
| 589 | struct ipa_endpoint *endpoint; |
| 590 | |
| 591 | /* This will complete when the transfer is received */ |
| 592 | reinit_completion(x: &ipa->completion); |
| 593 | |
| 594 | /* Issue a no-op register write command (mask 0 means no write) */ |
| 595 | ipa_cmd_register_write_add(trans, offset: 0, value: 0, mask: 0, clear_full: true); |
| 596 | |
| 597 | /* Send a data packet through the IPA pipeline. The packet_init |
| 598 | * command says to send the next packet directly to the exception |
| 599 | * endpoint without any other IPA processing. The tag_status |
| 600 | * command requests that status be generated on completion of |
| 601 | * that transfer, and that it will be tagged with a value. |
| 602 | * Finally, the transfer command sends a small packet of data |
| 603 | * (instead of a command) using the command endpoint. |
| 604 | */ |
| 605 | endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]; |
| 606 | ipa_cmd_ip_packet_init_add(trans, endpoint_id: endpoint->endpoint_id); |
| 607 | ipa_cmd_ip_tag_status_add(trans); |
| 608 | ipa_cmd_transfer_add(trans); |
| 609 | } |
| 610 | |
| 611 | /* Returns the number of commands required to clear the pipeline */ |
| 612 | u32 ipa_cmd_pipeline_clear_count(void) |
| 613 | { |
| 614 | return 4; |
| 615 | } |
| 616 | |
| 617 | void ipa_cmd_pipeline_clear_wait(struct ipa *ipa) |
| 618 | { |
| 619 | wait_for_completion(&ipa->completion); |
| 620 | } |
| 621 | |
| 622 | /* Allocate a transaction for the command TX endpoint */ |
| 623 | struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count) |
| 624 | { |
| 625 | struct ipa_endpoint *endpoint; |
| 626 | |
| 627 | if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX)) |
| 628 | return NULL; |
| 629 | |
| 630 | endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; |
| 631 | |
| 632 | return gsi_channel_trans_alloc(gsi: &ipa->gsi, channel_id: endpoint->channel_id, |
| 633 | tre_count, direction: DMA_NONE); |
| 634 | } |
| 635 | |
| 636 | /* Init function for immediate commands; there is no ipa_cmd_exit() */ |
| 637 | int ipa_cmd_init(struct ipa *ipa) |
| 638 | { |
| 639 | ipa_cmd_validate_build(); |
| 640 | |
| 641 | if (!ipa_cmd_header_init_local_valid(ipa)) |
| 642 | return -EINVAL; |
| 643 | |
| 644 | if (!ipa_cmd_register_write_valid(ipa)) |
| 645 | return -EINVAL; |
| 646 | |
| 647 | return 0; |
| 648 | } |
| 649 | |