summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarcin Wojtas <mw@FreeBSD.org>2020-05-26 15:04:49 +0000
committerMarcin Wojtas <mw@FreeBSD.org>2020-05-26 15:04:49 +0000
commit73cf51936f0f8f2a5661bf98d34521a7bf8feebd (patch)
tree686e232e23d0940262c4afd17f48c0710935a2fe
parentb6ee6cf7ab6368a44c82ae4e5ec591421b7a12df (diff)
downloadsrc-test2-73cf51936f0f8f2a5661bf98d34521a7bf8feebd.tar.gz
src-test2-73cf51936f0f8f2a5661bf98d34521a7bf8feebd.zip
Notes
-rw-r--r--ena_com.c500
-rw-r--r--ena_com.h285
-rw-r--r--ena_defs/ena_admin_defs.h106
-rw-r--r--ena_defs/ena_common_defs.h4
-rw-r--r--ena_defs/ena_eth_io_defs.h8
-rw-r--r--ena_defs/ena_gen_info.h6
-rw-r--r--ena_defs/ena_regs_defs.h5
-rw-r--r--ena_eth_com.c112
-rw-r--r--ena_eth_com.h34
-rw-r--r--ena_plat.h48
10 files changed, 526 insertions, 582 deletions
diff --git a/ena_com.c b/ena_com.c
index 76a2f06a7fce..dde0c3357f63 100644
--- a/ena_com.c
+++ b/ena_com.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,7 +70,9 @@
#define ENA_REGS_ADMIN_INTR_MASK 1
-#define ENA_POLL_MS 5
+#define ENA_MIN_POLL_US 100
+
+#define ENA_MAX_POLL_US 5000
/*****************************************************************************/
/*****************************************************************************/
@@ -99,7 +101,7 @@ struct ena_com_stats_ctx {
struct ena_admin_acq_get_stats_resp get_resp;
};
-static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
+static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
struct ena_common_mem_addr *ena_addr,
dma_addr_t addr)
{
@@ -200,7 +202,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
return 0;
}
-static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
+static void comp_ctxt_release(struct ena_com_admin_queue *queue,
struct ena_comp_ctx *comp_ctx)
{
comp_ctx->occupied = false;
@@ -216,6 +218,11 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
return NULL;
}
+ if (unlikely(!queue->comp_ctx)) {
+ ena_trc_err("Completion context is NULL\n");
+ return NULL;
+ }
+
if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
ena_trc_err("Completion context is occupied\n");
return NULL;
@@ -289,7 +296,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
return comp_ctx;
}
-static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
{
size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
struct ena_comp_ctx *comp_ctx;
@@ -409,6 +416,8 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
0x0, io_sq->llq_info.desc_list_entry_size);
io_sq->llq_buf_ctrl.descs_left_in_line =
io_sq->llq_info.descs_num_before_header;
+ io_sq->disable_meta_caching =
+ io_sq->llq_info.disable_meta_caching;
if (io_sq->llq_info.max_entries_in_tx_burst > 0)
io_sq->entries_in_tx_burst_left =
@@ -534,12 +543,9 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
if (unlikely(comp_status != 0))
ena_trc_err("admin command failed[%u]\n", comp_status);
- if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
- return ENA_COM_INVAL;
-
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
- return 0;
+ return ENA_COM_OK;
case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
return ENA_COM_NO_MEM;
case ENA_ADMIN_UNSUPPORTED_OPCODE:
@@ -551,24 +557,32 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
return ENA_COM_INVAL;
}
- return 0;
+ return ENA_COM_INVAL;
+}
+
+static inline void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
+{
+ delay_us = ENA_MAX32(ENA_MIN_POLL_US, delay_us);
+ delay_us = ENA_MIN32(delay_us * (1 << exp), ENA_MAX_POLL_US);
+ ENA_USLEEP(delay_us);
}
static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
struct ena_com_admin_queue *admin_queue)
{
unsigned long flags = 0;
- unsigned long timeout;
+ ena_time_t timeout;
int ret;
+ u32 exp = 0;
timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
while (1) {
- ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
- ena_com_handle_admin_completion(admin_queue);
- ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
+ ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
+ ena_com_handle_admin_completion(admin_queue);
+ ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
- if (comp_ctx->status != ENA_CMD_SUBMITTED)
+ if (comp_ctx->status != ENA_CMD_SUBMITTED)
break;
if (ENA_TIME_EXPIRE(timeout)) {
@@ -583,7 +597,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
goto err;
}
- ENA_MSLEEP(ENA_POLL_MS);
+ ena_delay_exponential_backoff_us(exp++, admin_queue->ena_dev->ena_min_poll_delay_us);
}
if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
@@ -629,6 +643,14 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
+ if (llq_info->disable_meta_caching)
+ cmd.u.llq.accel_mode.u.set.enabled_flags |=
+ BIT(ENA_ADMIN_DISABLE_META_CACHING);
+
+ if (llq_info->max_entries_in_tx_burst)
+ cmd.u.llq.accel_mode.u.set.enabled_flags |=
+ BIT(ENA_ADMIN_LIMIT_TX_BURST);
+
ret = ena_com_execute_admin_command(admin_queue,
(struct ena_admin_aq_entry *)&cmd,
sizeof(cmd),
@@ -748,15 +770,21 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
supported_feat,
llq_info->descs_num_before_header);
}
+ /* Check for accelerated queue supported */
+ llq_info->disable_meta_caching =
+ llq_features->accel_mode.u.get.supported_flags &
+ BIT(ENA_ADMIN_DISABLE_META_CACHING);
- llq_info->max_entries_in_tx_burst =
- (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
+ if (llq_features->accel_mode.u.get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
+ llq_info->max_entries_in_tx_burst =
+ llq_features->accel_mode.u.get.max_tx_burst_size /
+ llq_default_cfg->llq_ring_entry_size_value;
rc = ena_com_set_llq(ena_dev);
if (rc)
ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
- return 0;
+ return rc;
}
static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
@@ -779,16 +807,25 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
admin_queue->stats.no_completion++;
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
- if (comp_ctx->status == ENA_CMD_COMPLETED)
- ena_trc_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
- comp_ctx->cmd_opcode);
- else
- ena_trc_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
+ if (comp_ctx->status == ENA_CMD_COMPLETED) {
+ ena_trc_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
+ comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
+ /* Check if fallback to polling is enabled */
+ if (admin_queue->auto_polling)
+ admin_queue->polling = true;
+ } else {
+ ena_trc_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
comp_ctx->cmd_opcode, comp_ctx->status);
-
- admin_queue->running_state = false;
- ret = ENA_COM_TIMER_EXPIRED;
- goto err;
+ }
+ /* Check if shifted to polling mode.
+ * This will happen if there is a completion without an interrupt
+ * and autopolling mode is enabled. Continuing normal execution in such case
+ */
+ if (!admin_queue->polling) {
+ admin_queue->running_state = false;
+ ret = ENA_COM_TIMER_EXPIRED;
+ goto err;
+ }
}
ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
@@ -944,7 +981,9 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
}
if (io_sq->bounce_buf_ctrl.base_buffer) {
- ENA_MEM_FREE(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
+ ENA_MEM_FREE(ena_dev->dmadev,
+ io_sq->bounce_buf_ctrl.base_buffer,
+ (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
io_sq->bounce_buf_ctrl.base_buffer = NULL;
}
}
@@ -952,12 +991,13 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
u16 exp_state)
{
- u32 val, i;
+ u32 val, exp = 0;
+ ena_time_t timeout_stamp;
- /* Convert timeout from resolution of 100ms to ENA_POLL_MS */
- timeout = (timeout * 100) / ENA_POLL_MS;
+ /* Convert timeout from resolution of 100ms to us resolution. */
+ timeout_stamp = ENA_GET_SYSTEM_TIMEOUT(100 * 1000 * timeout);
- for (i = 0; i < timeout; i++) {
+ while (1) {
val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
@@ -969,10 +1009,11 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
exp_state)
return 0;
- ENA_MSLEEP(ENA_POLL_MS);
- }
+ if (ENA_TIME_EXPIRE(timeout_stamp))
+ return ENA_COM_TIMER_EXPIRED;
- return ENA_COM_TIMER_EXPIRED;
+ ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
+ }
}
static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
@@ -1055,10 +1096,34 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
feature_ver);
}
+int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->rss.hash_func;
+}
+
+static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ (ena_dev->rss).hash_key;
+
+ ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
+ /* The key buffer is stored in the device in an array of
+ * uint32 elements. Therefore the number of elements can be derived
+ * by dividing the buffer length by the size of each array element.
+ * In current implementation each element is sized at uint32_t
+ * so it's actually a division by 4 but if the element size changes,
+ * there is no need to rewrite this code.
+ */
+ hash_key->keys_num = sizeof(hash_key->key) / sizeof(hash_key->key[0]);
+}
+
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
+ return ENA_COM_UNSUPPORTED;
+
ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
sizeof(*rss->hash_key),
rss->hash_key,
@@ -1186,7 +1251,9 @@ static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
rss->rss_ind_tbl = NULL;
if (rss->host_rss_ind_tbl)
- ENA_MEM_FREE(ena_dev->dmadev, rss->host_rss_ind_tbl);
+ ENA_MEM_FREE(ena_dev->dmadev,
+ rss->host_rss_ind_tbl,
+ ((1ULL << rss->tbl_log_size) * sizeof(u16)));
rss->host_rss_ind_tbl = NULL;
}
@@ -1287,63 +1354,29 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
return 0;
}
-static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
-{
- u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
- struct ena_rss *rss = &ena_dev->rss;
- u8 idx;
- u16 i;
-
- for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
- dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
-
- for (i = 0; i < 1 << rss->tbl_log_size; i++) {
- if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
- return ENA_COM_INVAL;
- idx = (u8)rss->rss_ind_tbl[i].cq_idx;
-
- if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
- return ENA_COM_INVAL;
-
- rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
- }
-
- return 0;
-}
-
-static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
-{
- size_t size;
-
- size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
-
- ena_dev->intr_moder_tbl = ENA_MEM_ALLOC(ena_dev->dmadev, size);
- if (!ena_dev->intr_moder_tbl)
- return ENA_COM_NO_MEM;
-
- ena_com_config_default_interrupt_moderation_table(ena_dev);
-
- return 0;
-}
-
static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
u16 intr_delay_resolution)
{
- struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
- unsigned int i;
+ u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
- if (!intr_delay_resolution) {
+ if (unlikely(!intr_delay_resolution)) {
ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
- intr_delay_resolution = 1;
+ intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
}
- ena_dev->intr_delay_resolution = intr_delay_resolution;
/* update Rx */
- for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
- intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
+ ena_dev->intr_moder_rx_interval =
+ ena_dev->intr_moder_rx_interval *
+ prev_intr_delay_resolution /
+ intr_delay_resolution;
/* update Tx */
- ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
+ ena_dev->intr_moder_tx_interval =
+ ena_dev->intr_moder_tx_interval *
+ prev_intr_delay_resolution /
+ intr_delay_resolution;
+
+ ena_dev->intr_delay_resolution = intr_delay_resolution;
}
/*****************************************************************************/
@@ -1482,11 +1515,12 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
unsigned long flags = 0;
+ u32 exp = 0;
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
- ENA_MSLEEP(ENA_POLL_MS);
+ ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
}
ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
@@ -1667,7 +1701,9 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
if (admin_queue->comp_ctx)
- ENA_MEM_FREE(ena_dev->dmadev, admin_queue->comp_ctx);
+ ENA_MEM_FREE(ena_dev->dmadev,
+ admin_queue->comp_ctx,
+ (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
admin_queue->comp_ctx = NULL;
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
if (sq->entries)
@@ -1701,6 +1737,17 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
ena_dev->admin_queue.polling = polling;
}
+bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->admin_queue.polling;
+}
+
+void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
+ bool polling)
+{
+ ena_dev->admin_queue.auto_polling = polling;
+}
+
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
{
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
@@ -1838,6 +1885,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
if (ret)
goto error;
+ admin_queue->ena_dev = ena_dev;
admin_queue->running_state = true;
return 0;
@@ -1934,62 +1982,6 @@ int ena_com_get_link_params(struct ena_com_dev *ena_dev,
return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
}
-int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev)
-{
- struct ena_admin_get_feat_resp resp;
- struct ena_extra_properties_strings *extra_properties_strings =
- &ena_dev->extra_properties_strings;
- u32 rc;
- extra_properties_strings->size = ENA_ADMIN_EXTRA_PROPERTIES_COUNT *
- ENA_ADMIN_EXTRA_PROPERTIES_STRING_LEN;
-
- ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
- extra_properties_strings->size,
- extra_properties_strings->virt_addr,
- extra_properties_strings->dma_addr,
- extra_properties_strings->dma_handle);
- if (unlikely(!extra_properties_strings->virt_addr)) {
- ena_trc_err("Failed to allocate extra properties strings\n");
- return 0;
- }
-
- rc = ena_com_get_feature_ex(ena_dev, &resp,
- ENA_ADMIN_EXTRA_PROPERTIES_STRINGS,
- extra_properties_strings->dma_addr,
- extra_properties_strings->size, 0);
- if (rc) {
- ena_trc_dbg("Failed to get extra properties strings\n");
- goto err;
- }
-
- return resp.u.extra_properties_strings.count;
-err:
- ena_com_delete_extra_properties_strings(ena_dev);
- return 0;
-}
-
-void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev)
-{
- struct ena_extra_properties_strings *extra_properties_strings =
- &ena_dev->extra_properties_strings;
-
- if (extra_properties_strings->virt_addr) {
- ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
- extra_properties_strings->size,
- extra_properties_strings->virt_addr,
- extra_properties_strings->dma_addr,
- extra_properties_strings->dma_handle);
- extra_properties_strings->virt_addr = NULL;
- }
-}
-
-int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
- struct ena_admin_get_feat_resp *resp)
-{
- return ena_com_get_feature(ena_dev, resp,
- ENA_ADMIN_EXTRA_PROPERTIES_FLAGS, 0);
-}
-
int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
struct ena_com_dev_get_features_ctx *get_feat_ctx)
{
@@ -2111,7 +2103,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
struct ena_admin_aenq_entry *aenq_e;
struct ena_admin_aenq_common_desc *aenq_common;
struct ena_com_aenq *aenq = &dev->aenq;
- unsigned long long timestamp;
+ u64 timestamp;
ena_aenq_handler handler_cb;
u16 masked_head, processed = 0;
u8 phase;
@@ -2129,9 +2121,9 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
*/
dma_rmb();
- timestamp = (unsigned long long)aenq_common->timestamp_low |
- ((unsigned long long)aenq_common->timestamp_high << 32);
- ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
+ timestamp = (u64)aenq_common->timestamp_low |
+ ((u64)aenq_common->timestamp_high << 32);
+ ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%" ENA_PRIu64 "s]\n",
aenq_common->group,
aenq_common->syndrom,
timestamp);
@@ -2452,12 +2444,14 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
enum ena_admin_hash_functions func,
const u8 *key, u16 key_len, u32 init_val)
{
- struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key;
struct ena_admin_get_feat_resp get_resp;
- struct ena_admin_feature_rss_flow_hash_control *hash_key =
- rss->hash_key;
+ enum ena_admin_hash_functions old_func;
+ struct ena_rss *rss = &ena_dev->rss;
int rc;
+ hash_key = rss->hash_key;
+
/* Make sure size is a mult of DWs */
if (unlikely(key_len & 0x3))
return ENA_COM_INVAL;
@@ -2469,22 +2463,23 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
if (unlikely(rc))
return rc;
- if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
+ if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
ena_trc_err("Flow hash function %d isn't supported\n", func);
return ENA_COM_UNSUPPORTED;
}
switch (func) {
case ENA_ADMIN_TOEPLITZ:
- if (key_len > sizeof(hash_key->key)) {
- ena_trc_err("key len (%hu) is bigger than the max supported (%zu)\n",
- key_len, sizeof(hash_key->key));
- return ENA_COM_INVAL;
+ if (key) {
+ if (key_len != sizeof(hash_key->key)) {
+ ena_trc_err("key len (%hu) doesn't equal the supported size (%zu)\n",
+ key_len, sizeof(hash_key->key));
+ return ENA_COM_INVAL;
+ }
+ memcpy(hash_key->key, key, key_len);
+ rss->hash_init_val = init_val;
+ hash_key->keys_num = key_len / sizeof(hash_key->key[0]);
}
-
- memcpy(hash_key->key, key, key_len);
- rss->hash_init_val = init_val;
- hash_key->keys_num = key_len >> 2;
break;
case ENA_ADMIN_CRC32:
rss->hash_init_val = init_val;
@@ -2494,26 +2489,27 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return ENA_COM_INVAL;
}
+ old_func = rss->hash_func;
rss->hash_func = func;
rc = ena_com_set_hash_function(ena_dev);
/* Restore the old function */
if (unlikely(rc))
- ena_com_get_hash_function(ena_dev, NULL, NULL);
+ rss->hash_func = old_func;
return rc;
}
int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
- enum ena_admin_hash_functions *func,
- u8 *key)
+ enum ena_admin_hash_functions *func)
{
struct ena_rss *rss = &ena_dev->rss;
struct ena_admin_get_feat_resp get_resp;
- struct ena_admin_feature_rss_flow_hash_control *hash_key =
- rss->hash_key;
int rc;
+ if (unlikely(!func))
+ return ENA_COM_INVAL;
+
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
ENA_ADMIN_RSS_HASH_FUNCTION,
rss->hash_key_dma_addr,
@@ -2521,9 +2517,20 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
if (unlikely(rc))
return rc;
- rss->hash_func = get_resp.u.flow_hash_func.selected_func;
- if (func)
- *func = rss->hash_func;
+ /* ENA_FFS() returns 1 in case the lsb is set */
+ rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
+ if (rss->hash_func)
+ rss->hash_func--;
+
+ *func = rss->hash_func;
+
+ return 0;
+}
+
+int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
+{
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ ena_dev->rss.hash_key;
if (key)
memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
@@ -2785,10 +2792,6 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
if (!ind_tbl)
return 0;
- rc = ena_com_ind_tbl_convert_from_device(ena_dev);
- if (unlikely(rc))
- return rc;
-
for (i = 0; i < (1 << rss->tbl_log_size); i++)
ind_tbl[i] = rss->host_rss_ind_tbl[i];
@@ -2805,8 +2808,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
if (unlikely(rc))
goto err_indr_tbl;
+ /* The following function might return unsupported in case the
+ * device doesn't support setting the key / hash function. We can safely
+ * ignore this error and have indirection table support only.
+ */
rc = ena_com_hash_key_allocate(ena_dev);
- if (unlikely(rc))
+ if (likely(!rc))
+ ena_com_hash_key_fill_default_key(ena_dev);
+ else if (rc != ENA_COM_UNSUPPORTED)
goto err_hash_key;
rc = ena_com_hash_ctrl_init(ena_dev);
@@ -2956,42 +2965,35 @@ bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
ENA_ADMIN_INTERRUPT_MODERATION);
}
-int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
- u32 tx_coalesce_usecs)
+static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
+ u32 intr_delay_resolution,
+ u32 *intr_moder_interval)
{
- if (!ena_dev->intr_delay_resolution) {
+ if (!intr_delay_resolution) {
ena_trc_err("Illegal interrupt delay granularity value\n");
return ENA_COM_FAULT;
}
- ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
- ena_dev->intr_delay_resolution;
+ *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
return 0;
}
-int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
- u32 rx_coalesce_usecs)
-{
- if (!ena_dev->intr_delay_resolution) {
- ena_trc_err("Illegal interrupt delay granularity value\n");
- return ENA_COM_FAULT;
- }
-
- /* We use LOWEST entry of moderation table for storing
- * nonadaptive interrupt coalescing values
- */
- ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
- rx_coalesce_usecs / ena_dev->intr_delay_resolution;
- return 0;
+int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
+ u32 tx_coalesce_usecs)
+{
+ return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
+ ena_dev->intr_delay_resolution,
+ &ena_dev->intr_moder_tx_interval);
}
-void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
+int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
+ u32 rx_coalesce_usecs)
{
- if (ena_dev->intr_moder_tbl)
- ENA_MEM_FREE(ena_dev->dmadev, ena_dev->intr_moder_tbl);
- ena_dev->intr_moder_tbl = NULL;
+ return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
+ ena_dev->intr_delay_resolution,
+ &ena_dev->intr_moder_rx_interval);
}
int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
@@ -3018,62 +3020,14 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
return rc;
}
- rc = ena_com_init_interrupt_moderation_table(ena_dev);
- if (rc)
- goto err;
-
/* if moderation is supported by device we set adaptive moderation */
delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
- ena_com_enable_adaptive_moderation(ena_dev);
- return 0;
-err:
- ena_com_destroy_interrupt_moderation(ena_dev);
- return rc;
-}
+ /* Disable adaptive moderation by default - can be enabled later */
+ ena_com_disable_adaptive_moderation(ena_dev);
-void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
-{
- struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
-
- if (!intr_moder_tbl)
- return;
-
- intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
- ENA_INTR_LOWEST_USECS;
- intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
- ENA_INTR_LOWEST_PKTS;
- intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
- ENA_INTR_LOWEST_BYTES;
-
- intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
- ENA_INTR_LOW_USECS;
- intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
- ENA_INTR_LOW_PKTS;
- intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
- ENA_INTR_LOW_BYTES;
-
- intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
- ENA_INTR_MID_USECS;
- intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
- ENA_INTR_MID_PKTS;
- intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
- ENA_INTR_MID_BYTES;
-
- intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
- ENA_INTR_HIGH_USECS;
- intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
- ENA_INTR_HIGH_PKTS;
- intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
- ENA_INTR_HIGH_BYTES;
-
- intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
- ENA_INTR_HIGHEST_USECS;
- intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
- ENA_INTR_HIGHEST_PKTS;
- intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
- ENA_INTR_HIGHEST_BYTES;
+ return 0;
}
unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
@@ -3083,57 +3037,15 @@ unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *
unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
{
- struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
-
- if (intr_moder_tbl)
- return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
-
- return 0;
-}
-
-void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
- enum ena_intr_moder_level level,
- struct ena_intr_moder_entry *entry)
-{
- struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
-
- if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
- return;
-
- intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
- if (ena_dev->intr_delay_resolution)
- intr_moder_tbl[level].intr_moder_interval /=
- ena_dev->intr_delay_resolution;
- intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
-
- /* use hardcoded value until ethtool supports bytecount parameter */
- if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
- intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
-}
-
-void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
- enum ena_intr_moder_level level,
- struct ena_intr_moder_entry *entry)
-{
- struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
-
- if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
- return;
-
- entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
- if (ena_dev->intr_delay_resolution)
- entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
- entry->pkts_per_interval =
- intr_moder_tbl[level].pkts_per_interval;
- entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
+ return ena_dev->intr_moder_rx_interval;
}
int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
struct ena_admin_feature_llq_desc *llq_features,
struct ena_llq_configurations *llq_default_cfg)
{
+ struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
int rc;
- int size;
if (!llq_features->max_llq_num) {
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
@@ -3144,14 +3056,12 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
if (rc)
return rc;
- /* Validate the descriptor is not too big */
- size = ena_dev->tx_max_header_size;
- size += ena_dev->llq_info.descs_num_before_header *
- sizeof(struct ena_eth_io_tx_desc);
+ ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
+ (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
- if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+ if (unlikely(ena_dev->tx_max_header_size == 0)) {
ena_trc_err("the size of the LLQ entry is smaller than needed\n");
- return ENA_COM_INVAL;
+ return -EINVAL;
}
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
diff --git a/ena_com.h b/ena_com.h
index 922f5d5d583d..c1b9540edd0b 100644
--- a/ena_com.h
+++ b/ena_com.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -36,9 +36,9 @@
#include "ena_plat.h"
-#define ENA_MAX_NUM_IO_QUEUES 128U
+#define ENA_MAX_NUM_IO_QUEUES 128U
/* We need to queues for each IO (on for Tx and one for Rx) */
-#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
+#define ENA_TOTAL_NUM_QUEUES (2 * (ENA_MAX_NUM_IO_QUEUES))
#define ENA_MAX_HANDLERS 256
@@ -55,45 +55,15 @@
/*****************************************************************************/
/* ENA adaptive interrupt moderation settings */
-#define ENA_INTR_LOWEST_USECS (0)
-#define ENA_INTR_LOWEST_PKTS (3)
-#define ENA_INTR_LOWEST_BYTES (2 * 1524)
+#define ENA_INTR_INITIAL_TX_INTERVAL_USECS ENA_INTR_INITIAL_TX_INTERVAL_USECS_PLAT
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
+#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
-#define ENA_INTR_LOW_USECS (32)
-#define ENA_INTR_LOW_PKTS (12)
-#define ENA_INTR_LOW_BYTES (16 * 1024)
+#define ENA_HASH_KEY_SIZE 40
-#define ENA_INTR_MID_USECS (80)
-#define ENA_INTR_MID_PKTS (48)
-#define ENA_INTR_MID_BYTES (64 * 1024)
+#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
-#define ENA_INTR_HIGH_USECS (128)
-#define ENA_INTR_HIGH_PKTS (96)
-#define ENA_INTR_HIGH_BYTES (128 * 1024)
-
-#define ENA_INTR_HIGHEST_USECS (192)
-#define ENA_INTR_HIGHEST_PKTS (128)
-#define ENA_INTR_HIGHEST_BYTES (192 * 1024)
-
-#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 196
-#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 4
-#define ENA_INTR_DELAY_OLD_VALUE_WEIGHT 6
-#define ENA_INTR_DELAY_NEW_VALUE_WEIGHT 4
-#define ENA_INTR_MODER_LEVEL_STRIDE 1
-#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF
-
-#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
-
-#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
-
-enum ena_intr_moder_level {
- ENA_INTR_MODER_LOWEST = 0,
- ENA_INTR_MODER_LOW,
- ENA_INTR_MODER_MID,
- ENA_INTR_MODER_HIGH,
- ENA_INTR_MODER_HIGHEST,
- ENA_INTR_MAX_NUM_OF_LEVELS,
-};
+#define ENA_FEATURE_MAX_QUEUE_EXT_VER 1
struct ena_llq_configurations {
enum ena_admin_llq_header_location llq_header_location;
@@ -103,12 +73,6 @@ struct ena_llq_configurations {
u16 llq_ring_entry_size_value;
};
-struct ena_intr_moder_entry {
- unsigned int intr_moder_interval;
- unsigned int pkts_per_interval;
- unsigned int bytes_per_interval;
-};
-
enum queue_direction {
ENA_COM_IO_QUEUE_DIRECTION_TX,
ENA_COM_IO_QUEUE_DIRECTION_RX
@@ -146,6 +110,7 @@ struct ena_com_llq_info {
u16 descs_num_before_header;
u16 descs_per_entry;
u16 max_entries_in_tx_burst;
+ bool disable_meta_caching;
};
struct ena_com_io_cq {
@@ -210,6 +175,8 @@ struct ena_com_io_sq {
enum queue_direction direction;
enum ena_admin_placement_policy_type mem_queue_type;
+ bool disable_meta_caching;
+
u32 msix_vector;
struct ena_com_tx_meta cached_tx_meta;
struct ena_com_llq_info llq_info;
@@ -253,16 +220,17 @@ struct ena_com_admin_sq {
};
struct ena_com_stats_admin {
- u32 aborted_cmd;
- u32 submitted_cmd;
- u32 completed_cmd;
- u32 out_of_space;
- u32 no_completion;
+ u64 aborted_cmd;
+ u64 submitted_cmd;
+ u64 completed_cmd;
+ u64 out_of_space;
+ u64 no_completion;
};
struct ena_com_admin_queue {
void *q_dmadev;
void *bus;
+ struct ena_com_dev *ena_dev;
ena_spinlock_t q_lock; /* spinlock for the admin queue */
struct ena_comp_ctx *comp_ctx;
@@ -274,6 +242,9 @@ struct ena_com_admin_queue {
/* Indicate if the admin queue should poll for completion */
bool polling;
+ /* Define if fallback to polling mode should occur */
+ bool auto_polling;
+
u16 curr_cmd_id;
/* Indicate that the ena was initialized and can
@@ -345,13 +316,6 @@ struct ena_host_attribute {
ena_mem_handle_t host_info_dma_handle;
};
-struct ena_extra_properties_strings {
- u8 *virt_addr;
- dma_addr_t dma_addr;
- ena_mem_handle_t dma_handle;
- u32 size;
-};
-
/* Each ena_dev is a PCI function. */
struct ena_com_dev {
struct ena_com_admin_queue admin_queue;
@@ -377,11 +341,18 @@ struct ena_com_dev {
struct ena_host_attribute host_attr;
bool adaptive_coalescing;
u16 intr_delay_resolution;
+
+ /* interrupt moderation intervals are in usec divided by
+ * intr_delay_resolution, which is supplied by the device.
+ */
u32 intr_moder_tx_interval;
+ u32 intr_moder_rx_interval;
+
struct ena_intr_moder_entry *intr_moder_tbl;
struct ena_com_llq_info llq_info;
- struct ena_extra_properties_strings extra_properties_strings;
+
+ u32 ena_min_poll_delay_us;
};
struct ena_com_dev_get_features_ctx {
@@ -430,7 +401,7 @@ extern "C" {
*/
int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev);
-/* ena_com_set_mmio_read_mode - Enable/disable the mmio reg read mechanism
+/* ena_com_set_mmio_read_mode - Enable/disable the indirect mmio reg read mechanism
* @ena_dev: ENA communication layer struct
* @readless_supported: readless mode (enable/disable)
*/
@@ -538,7 +509,7 @@ bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev);
*/
void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
-/* ena_com_set_admin_polling_mode - Get the admin completion queue polling mode
+/* ena_com_get_admin_polling_mode - Get the admin completion queue polling mode
* @ena_dev: ENA communication layer struct
*
* Get the admin completion mode.
@@ -548,12 +519,23 @@ void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling);
*
* @return state
*/
-bool ena_com_get_ena_admin_polling_mode(struct ena_com_dev *ena_dev);
+bool ena_com_get_admin_polling_mode(struct ena_com_dev *ena_dev);
+
+/* ena_com_set_admin_auto_polling_mode - Enable autoswitch to polling mode
+ * @ena_dev: ENA communication layer struct
+ * @polling: Enable/Disable polling mode
+ *
+ * Set the autopolling mode.
+ * If autopolling is on:
+ * In case of missing interrupt when data is available switch to polling.
+ */
+void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
+ bool polling);
/* ena_com_admin_q_comp_intr_handler - admin queue interrupt handler
* @ena_dev: ENA communication layer struct
*
- * This method go over the admin completion queue and wake up all the pending
+ * This method goes over the admin completion queue and wakes up all the pending
* threads that wait on the commands wait event.
*
* @note: Should be called after MSI-X interrupt.
@@ -563,7 +545,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
/* ena_com_aenq_intr_handler - AENQ interrupt handler
* @ena_dev: ENA communication layer struct
*
- * This method go over the async event notification queue and call the proper
+ * This method goes over the async event notification queue and calls the proper
* aenq handler.
*/
void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
@@ -580,14 +562,14 @@ void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev);
/* ena_com_wait_for_abort_completion - Wait for admin commands abort.
* @ena_dev: ENA communication layer struct
*
- * This method wait until all the outstanding admin commands will be completed.
+ * This method waits until all the outstanding admin commands are completed.
*/
void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev);
/* ena_com_validate_version - Validate the device parameters
* @ena_dev: ENA communication layer struct
*
- * This method validate the device parameters are the same as the saved
+ * This method verifies the device parameters are the same as the saved
* parameters in ena_dev.
* This method is useful after device reset, to validate the device mac address
* and the device offloads are the same as before the reset.
@@ -608,31 +590,6 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev);
int ena_com_get_link_params(struct ena_com_dev *ena_dev,
struct ena_admin_get_feat_resp *resp);
-/* ena_com_extra_properties_strings_init - Initialize the extra properties strings buffer.
- * @ena_dev: ENA communication layer struct
- *
- * Initialize the extra properties strings buffer.
- */
-int ena_com_extra_properties_strings_init(struct ena_com_dev *ena_dev);
-
-/* ena_com_delete_extra_properties_strings - Free the extra properties strings buffer.
- * @ena_dev: ENA communication layer struct
- *
- * Free the allocated extra properties strings buffer.
- */
-void ena_com_delete_extra_properties_strings(struct ena_com_dev *ena_dev);
-
-/* ena_com_get_extra_properties_flags - Retrieve extra properties flags.
- * @ena_dev: ENA communication layer struct
- * @resp: Extra properties flags.
- *
- * Retrieve the extra properties flags.
- *
- * @return - 0 on Success negative value otherwise.
- */
-int ena_com_get_extra_properties_flags(struct ena_com_dev *ena_dev,
- struct ena_admin_get_feat_resp *resp);
-
/* ena_com_get_dma_width - Retrieve physical dma address width the device
* supports.
* @ena_dev: ENA communication layer struct
@@ -707,6 +664,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
*/
void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
+/* ena_com_get_current_hash_function - Get RSS hash function
+ * @ena_dev: ENA communication layer struct
+ *
+ * Return the current hash function.
+ * @return: 0 or one of the ena_admin_hash_functions values.
+ */
+int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
+
/* ena_com_fill_hash_function - Fill RSS hash function
* @ena_dev: ENA communication layer struct
* @func: The hash function (Toeplitz or crc)
@@ -738,23 +703,32 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
*/
int ena_com_set_hash_function(struct ena_com_dev *ena_dev);
-/* ena_com_get_hash_function - Retrieve the hash function and the hash key
- * from the device.
+/* ena_com_get_hash_function - Retrieve the hash function from the device.
* @ena_dev: ENA communication layer struct
* @func: hash function
- * @key: hash key
*
- * Retrieve the hash function and the hash key from the device.
+ * Retrieve the hash function from the device.
*
- * @note: If the caller called ena_com_fill_hash_function but didn't flash
+ * @note: If the caller called ena_com_fill_hash_function but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
*/
int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
- enum ena_admin_hash_functions *func,
- u8 *key);
+ enum ena_admin_hash_functions *func);
+/* ena_com_get_hash_key - Retrieve the hash key
+ * @ena_dev: ENA communication layer struct
+ * @key: hash key
+ *
+ * Retrieve the hash key.
+ *
+ * @note: If the caller called ena_com_fill_hash_key but didn't flush
+ * it to the device, the new configuration will be lost.
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key);
/* ena_com_fill_hash_ctrl - Fill RSS hash control
* @ena_dev: ENA communication layer struct.
* @proto: The protocol to configure.
@@ -789,7 +763,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev);
*
* Retrieve the hash control from the device.
*
- * @note, If the caller called ena_com_fill_hash_ctrl but didn't flash
+ * @note: If the caller called ena_com_fill_hash_ctrl but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
@@ -841,7 +815,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev);
*
* Retrieve the RSS indirection table from the device.
*
- * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flash
+ * @note: If the caller called ena_com_indirect_table_fill_entry but didn't flush
* it to the device, the new configuration will be lost.
*
* @return: 0 on Success and negative value otherwise.
@@ -867,14 +841,14 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
/* ena_com_delete_debug_area - Free the debug area resources.
* @ena_dev: ENA communication layer struct
*
- * Free the allocate debug area.
+ * Free the allocated debug area.
*/
void ena_com_delete_debug_area(struct ena_com_dev *ena_dev);
/* ena_com_delete_host_info - Free the host info resources.
* @ena_dev: ENA communication layer struct
*
- * Free the allocate host info.
+ * Free the allocated host info.
*/
void ena_com_delete_host_info(struct ena_com_dev *ena_dev);
@@ -915,9 +889,9 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
* @cmd_completion: command completion return value.
* @cmd_comp_size: command completion size.
- * Submit an admin command and then wait until the device will return a
+ * Submit an admin command and then wait until the device returns a
* completion.
- * The completion will be copyed into cmd_comp.
+ * The completion will be copied into cmd_comp.
*
* @return - 0 on success, negative value on failure.
*/
@@ -934,11 +908,6 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
*/
int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev);
-/* ena_com_destroy_interrupt_moderation - Destroy interrupt moderation resources
- * @ena_dev: ENA communication layer struct
- */
-void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev);
-
/* ena_com_interrupt_moderation_supported - Return if interrupt moderation
* capability is supported by the device.
*
@@ -946,12 +915,6 @@ void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev);
*/
bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev);
-/* ena_com_config_default_interrupt_moderation_table - Restore the interrupt
- * moderation table back to the default parameters.
- * @ena_dev: ENA communication layer struct
- */
-void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev);
-
/* ena_com_update_nonadaptive_moderation_interval_tx - Update the
* non-adaptive interval in Tx direction.
* @ena_dev: ENA communication layer struct
@@ -988,29 +951,6 @@ unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *
*/
unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev);
-/* ena_com_init_intr_moderation_entry - Update a single entry in the interrupt
- * moderation table.
- * @ena_dev: ENA communication layer struct
- * @level: Interrupt moderation table level
- * @entry: Entry value
- *
- * Update a single entry in the interrupt moderation table.
- */
-void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
- enum ena_intr_moder_level level,
- struct ena_intr_moder_entry *entry);
-
-/* ena_com_get_intr_moderation_entry - Init ena_intr_moder_entry.
- * @ena_dev: ENA communication layer struct
- * @level: Interrupt moderation table level
- * @entry: Entry to fill.
- *
- * Initialize the entry according to the adaptive interrupt moderation table.
- */
-void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
- enum ena_intr_moder_level level,
- struct ena_intr_moder_entry *entry);
-
/* ena_com_config_dev_mode - Configure the placement policy of the device.
* @ena_dev: ENA communication layer struct
* @llq_features: LLQ feature descriptor, retrieve via
@@ -1036,80 +976,11 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d
ena_dev->adaptive_coalescing = false;
}
-/* ena_com_calculate_interrupt_delay - Calculate new interrupt delay
- * @ena_dev: ENA communication layer struct
- * @pkts: Number of packets since the last update
- * @bytes: Number of bytes received since the last update.
- * @smoothed_interval: Returned interval
- * @moder_tbl_idx: Current table level as input update new level as return
- * value.
- */
-static inline void ena_com_calculate_interrupt_delay(struct ena_com_dev *ena_dev,
- unsigned int pkts,
- unsigned int bytes,
- unsigned int *smoothed_interval,
- unsigned int *moder_tbl_idx)
-{
- enum ena_intr_moder_level curr_moder_idx, new_moder_idx;
- struct ena_intr_moder_entry *curr_moder_entry;
- struct ena_intr_moder_entry *pred_moder_entry;
- struct ena_intr_moder_entry *new_moder_entry;
- struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
- unsigned int interval;
-
- /* We apply adaptive moderation on Rx path only.
- * Tx uses static interrupt moderation.
- */
- if (!pkts || !bytes)
- /* Tx interrupt, or spurious interrupt,
- * in both cases we just use same delay values
- */
- return;
-
- curr_moder_idx = (enum ena_intr_moder_level)(*moder_tbl_idx);
- if (unlikely(curr_moder_idx >= ENA_INTR_MAX_NUM_OF_LEVELS)) {
- ena_trc_err("Wrong moderation index %u\n", curr_moder_idx);
- return;
- }
-
- curr_moder_entry = &intr_moder_tbl[curr_moder_idx];
- new_moder_idx = curr_moder_idx;
-
- if (curr_moder_idx == ENA_INTR_MODER_LOWEST) {
- if ((pkts > curr_moder_entry->pkts_per_interval) ||
- (bytes > curr_moder_entry->bytes_per_interval))
- new_moder_idx =
- (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
- } else {
- pred_moder_entry = &intr_moder_tbl[curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE];
-
- if ((pkts <= pred_moder_entry->pkts_per_interval) ||
- (bytes <= pred_moder_entry->bytes_per_interval))
- new_moder_idx =
- (enum ena_intr_moder_level)(curr_moder_idx - ENA_INTR_MODER_LEVEL_STRIDE);
- else if ((pkts > curr_moder_entry->pkts_per_interval) ||
- (bytes > curr_moder_entry->bytes_per_interval)) {
- if (curr_moder_idx != ENA_INTR_MODER_HIGHEST)
- new_moder_idx =
- (enum ena_intr_moder_level)(curr_moder_idx + ENA_INTR_MODER_LEVEL_STRIDE);
- }
- }
- new_moder_entry = &intr_moder_tbl[new_moder_idx];
-
- interval = new_moder_entry->intr_moder_interval;
- *smoothed_interval = (
- (interval * ENA_INTR_DELAY_NEW_VALUE_WEIGHT +
- ENA_INTR_DELAY_OLD_VALUE_WEIGHT * (*smoothed_interval)) + 5) /
- 10;
-
- *moder_tbl_idx = new_moder_idx;
-}
-
/* ena_com_update_intr_reg - Prepare interrupt register
* @intr_reg: interrupt register to update.
* @rx_delay_interval: Rx interval in usecs
* @tx_delay_interval: Tx interval in usecs
- * @unmask: unask enable/disable
+ * @unmask: unmask enable/disable
*
* Prepare interrupt update register with the supplied parameters.
*/
diff --git a/ena_defs/ena_admin_defs.h b/ena_defs/ena_admin_defs.h
index 58dec5e77a7f..52cdb9e5e394 100644
--- a/ena_defs/ena_admin_defs.h
+++ b/ena_defs/ena_admin_defs.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -409,6 +409,10 @@ struct ena_admin_basic_stats {
uint32_t rx_drops_low;
uint32_t rx_drops_high;
+
+ uint32_t tx_drops_low;
+
+ uint32_t tx_drops_high;
};
struct ena_admin_acq_get_stats_resp {
@@ -492,6 +496,36 @@ enum ena_admin_llq_stride_ctrl {
ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY = 2,
};
+enum ena_admin_accel_mode_feat {
+ ENA_ADMIN_DISABLE_META_CACHING = 0,
+ ENA_ADMIN_LIMIT_TX_BURST = 1,
+};
+
+struct ena_admin_accel_mode_get {
+ /* bit field of enum ena_admin_accel_mode_feat */
+ uint16_t supported_flags;
+
+ /* maximum burst size between two doorbells. The size is in bytes */
+ uint16_t max_tx_burst_size;
+};
+
+struct ena_admin_accel_mode_set {
+ /* bit field of enum ena_admin_accel_mode_feat */
+ uint16_t enabled_flags;
+
+ uint16_t reserved;
+};
+
+struct ena_admin_accel_mode_req {
+ union {
+ uint32_t raw[2];
+
+ struct ena_admin_accel_mode_get get;
+
+ struct ena_admin_accel_mode_set set;
+ } u;
+};
+
struct ena_admin_feature_llq_desc {
uint32_t max_llq_num;
@@ -537,10 +571,13 @@ struct ena_admin_feature_llq_desc {
/* the stride control the driver selected to use */
uint16_t descriptors_stride_ctrl_enabled;
- /* Maximum size in bytes taken by llq entries in a single tx burst.
- * Set to 0 when there is no such limit.
+ /* reserved */
+ uint32_t reserved1;
+
+ /* accelerated low latency queues requirment. driver needs to
+ * support those requirments in order to use accelerated llq
*/
- uint32_t max_tx_burst_size;
+ struct ena_admin_accel_mode_req accel_mode;
};
struct ena_admin_queue_ext_feature_fields {
@@ -821,6 +858,14 @@ struct ena_admin_host_info {
uint16_t num_cpus;
uint16_t reserved;
+
+ /* 0 : mutable_rss_table_size
+ * 1 : rx_offset
+ * 2 : interrupt_moderation
+ * 3 : map_rx_buf_bidirectional
+ * 31:4 : reserved
+ */
+ uint32_t driver_supported_features;
};
struct ena_admin_rss_ind_table_entry {
@@ -1033,6 +1078,10 @@ struct ena_admin_aenq_keep_alive_desc {
uint32_t rx_drops_low;
uint32_t rx_drops_high;
+
+ uint32_t tx_drops_low;
+
+ uint32_t tx_drops_high;
};
struct ena_admin_ena_mmio_req_read_less_resp {
@@ -1132,6 +1181,13 @@ struct ena_admin_ena_mmio_req_read_less_resp {
#define ENA_ADMIN_HOST_INFO_DEVICE_MASK GENMASK(7, 3)
#define ENA_ADMIN_HOST_INFO_BUS_SHIFT 8
#define ENA_ADMIN_HOST_INFO_BUS_MASK GENMASK(15, 8)
+#define ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK BIT(0)
+#define ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT 1
+#define ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK BIT(1)
+#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT 2
+#define ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK BIT(2)
+#define ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT 3
+#define ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK BIT(3)
/* feature_rss_ind_table */
#define ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK BIT(0)
@@ -1553,6 +1609,46 @@ static inline void set_ena_admin_host_info_bus(struct ena_admin_host_info *p, ui
p->bdf |= (val << ENA_ADMIN_HOST_INFO_BUS_SHIFT) & ENA_ADMIN_HOST_INFO_BUS_MASK;
}
+static inline uint32_t get_ena_admin_host_info_mutable_rss_table_size(const struct ena_admin_host_info *p)
+{
+ return p->driver_supported_features & ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK;
+}
+
+static inline void set_ena_admin_host_info_mutable_rss_table_size(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_supported_features |= val & ENA_ADMIN_HOST_INFO_MUTABLE_RSS_TABLE_SIZE_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_rx_offset(const struct ena_admin_host_info *p)
+{
+ return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK) >> ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT;
+}
+
+static inline void set_ena_admin_host_info_rx_offset(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_RX_OFFSET_SHIFT) & ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_interrupt_moderation(const struct ena_admin_host_info *p)
+{
+ return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK) >> ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT;
+}
+
+static inline void set_ena_admin_host_info_interrupt_moderation(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_SHIFT) & ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK;
+}
+
+static inline uint32_t get_ena_admin_host_info_map_rx_buf_bidirectional(const struct ena_admin_host_info *p)
+{
+ return (p->driver_supported_features & ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK) >> ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT;
+}
+
+static inline void set_ena_admin_host_info_map_rx_buf_bidirectional(struct ena_admin_host_info *p, uint32_t val)
+{
+ p->driver_supported_features |= (val << ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_SHIFT) & ENA_ADMIN_HOST_INFO_MAP_RX_BUF_BIDIRECTIONAL_MASK;
+}
+
static inline uint8_t get_ena_admin_feature_rss_ind_table_one_entry_update(const struct ena_admin_feature_rss_ind_table *p)
{
return p->flags & ENA_ADMIN_FEATURE_RSS_IND_TABLE_ONE_ENTRY_UPDATE_MASK;
@@ -1584,4 +1680,4 @@ static inline void set_ena_admin_aenq_link_change_desc_link_status(struct ena_ad
}
#endif /* !defined(DEFS_LINUX_MAINLINE) */
-#endif /*_ENA_ADMIN_H_ */
+#endif /* _ENA_ADMIN_H_ */
diff --git a/ena_defs/ena_common_defs.h b/ena_defs/ena_common_defs.h
index 05435739efd1..88b90d44a79a 100644
--- a/ena_defs/ena_common_defs.h
+++ b/ena_defs/ena_common_defs.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,4 +46,4 @@ struct ena_common_mem_addr {
uint16_t reserved16;
};
-#endif /*_ENA_COMMON_H_ */
+#endif /* _ENA_COMMON_H_ */
diff --git a/ena_defs/ena_eth_io_defs.h b/ena_defs/ena_eth_io_defs.h
index 2b5c9b78d19f..14f44d0d9a86 100644
--- a/ena_defs/ena_eth_io_defs.h
+++ b/ena_defs/ena_eth_io_defs.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -265,7 +265,9 @@ struct ena_eth_io_rx_cdesc_base {
uint16_t sub_qid;
- uint16_t reserved;
+ uint8_t offset;
+
+ uint8_t reserved;
};
/* 8-word format */
@@ -965,4 +967,4 @@ static inline void set_ena_eth_io_numa_node_cfg_reg_enabled(struct ena_eth_io_nu
}
#endif /* !defined(DEFS_LINUX_MAINLINE) */
-#endif /*_ENA_ETH_IO_H_ */
+#endif /* _ENA_ETH_IO_H_ */
diff --git a/ena_defs/ena_gen_info.h b/ena_defs/ena_gen_info.h
index 5620816a9672..83ed024ae4cc 100644
--- a/ena_defs/ena_gen_info.h
+++ b/ena_defs/ena_gen_info.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -30,5 +30,5 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#define ENA_GEN_DATE "Mon Oct 8 20:25:08 DST 2018"
-#define ENA_GEN_COMMIT "e70f3a6"
+#define ENA_GEN_DATE "Mon Apr 20 15:41:59 DST 2020"
+#define ENA_GEN_COMMIT "daa45ac"
diff --git a/ena_defs/ena_regs_defs.h b/ena_defs/ena_regs_defs.h
index 359cdbd02dbc..53ac662b6189 100644
--- a/ena_defs/ena_regs_defs.h
+++ b/ena_defs/ena_regs_defs.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -49,6 +49,7 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_USER_TRIGGER = 12,
ENA_REGS_RESET_GENERIC = 13,
ENA_REGS_RESET_MISS_INTERRUPT = 14,
+ ENA_REGS_RESET_LAST,
};
/* ena_registers offsets */
@@ -155,4 +156,4 @@ enum ena_regs_reset_reason_types {
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_SHIFT 16
#define ENA_REGS_RSS_IND_ENTRY_UPDATE_CQ_IDX_MASK 0xffff0000
-#endif /*_ENA_REGS_H_ */
+#endif /* _ENA_REGS_H_ */
diff --git a/ena_eth_com.c b/ena_eth_com.c
index bbed92d22c47..58ddb82246fd 100644
--- a/ena_eth_com.c
+++ b/ena_eth_com.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,7 @@
#include "ena_eth_com.h"
-static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
struct ena_com_io_cq *io_cq)
{
struct ena_eth_io_rx_cdesc_base *cdesc;
@@ -60,7 +60,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
return cdesc;
}
-static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
+static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
{
u16 tail_masked;
u32 offset;
@@ -72,7 +72,7 @@ static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
}
-static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
u8 *bounce_buffer)
{
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
@@ -113,7 +113,7 @@ static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq
return ENA_COM_OK;
}
-static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
u8 *header_src,
u16 header_len)
{
@@ -143,7 +143,7 @@ static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
return 0;
}
-static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
{
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
u8 *bounce_buffer;
@@ -163,7 +163,7 @@ static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
return sq_desc;
}
-static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
+static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
{
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
@@ -176,8 +176,10 @@ static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
if (pkt_ctrl->idx) {
rc = ena_com_write_bounce_buffer_to_dev(io_sq,
pkt_ctrl->curr_bounce_buf);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ ena_trc_err("failed to write bounce buffer to device\n");
return rc;
+ }
pkt_ctrl->curr_bounce_buf =
ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
@@ -190,7 +192,7 @@ static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
return ENA_COM_OK;
}
-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+static void *get_sq_desc(struct ena_com_io_sq *io_sq)
{
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
return get_sq_desc_llq(io_sq);
@@ -198,7 +200,7 @@ static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
return get_sq_desc_regular_queue(io_sq);
}
-static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
+static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
{
struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
struct ena_com_llq_info *llq_info = &io_sq->llq_info;
@@ -207,13 +209,15 @@ static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
if (!pkt_ctrl->descs_left_in_line) {
rc = ena_com_write_bounce_buffer_to_dev(io_sq,
pkt_ctrl->curr_bounce_buf);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ ena_trc_err("failed to write bounce buffer to device\n");
return rc;
+ }
pkt_ctrl->curr_bounce_buf =
ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
- memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
- 0x0, llq_info->desc_list_entry_size);
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+ 0x0, llq_info->desc_list_entry_size);
pkt_ctrl->idx = 0;
if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
@@ -226,7 +230,7 @@ static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
return ENA_COM_OK;
}
-static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
{
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
return ena_com_sq_update_llq_tail(io_sq);
@@ -240,7 +244,7 @@ static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
return ENA_COM_OK;
}
-static inline struct ena_eth_io_rx_cdesc_base *
+static struct ena_eth_io_rx_cdesc_base *
ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
{
idx &= (io_cq->q_depth - 1);
@@ -249,7 +253,7 @@ static inline struct ena_eth_io_rx_cdesc_base *
idx * io_cq->cdesc_entry_size_in_bytes);
}
-static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
u16 *first_cdesc_idx)
{
struct ena_eth_io_rx_cdesc_base *cdesc;
@@ -286,11 +290,10 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
return count;
}
-static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
- struct ena_com_tx_ctx *ena_tx_ctx)
+static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_meta *ena_meta)
{
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
- struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
meta_desc = get_sq_desc(io_sq);
memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
@@ -310,12 +313,13 @@ static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io
/* Extended meta desc */
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
- meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
meta_desc->len_ctrl |= (io_sq->phase <<
ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+
meta_desc->word2 |= ena_meta->l3_hdr_len &
ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
@@ -326,16 +330,37 @@ static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
- meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
+ return ena_com_sq_update_tail(io_sq);
+}
- /* Cached the meta desc */
- memcpy(&io_sq->cached_tx_meta, ena_meta,
- sizeof(struct ena_com_tx_meta));
+static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx,
+ bool *have_meta)
+{
+ struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
- return ena_com_sq_update_tail(io_sq);
+ /* When disable meta caching is set, don't bother to save the meta and
+ * compare it to the stored version, just create the meta
+ */
+ if (io_sq->disable_meta_caching) {
+ if (unlikely(!ena_tx_ctx->meta_valid))
+ return ENA_COM_INVAL;
+
+ *have_meta = true;
+ return ena_com_create_meta(io_sq, ena_meta);
+ } else if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
+ *have_meta = true;
+ /* Cache the meta desc */
+ memcpy(&io_sq->cached_tx_meta, ena_meta,
+ sizeof(struct ena_com_tx_meta));
+ return ena_com_create_meta(io_sq, ena_meta);
+ } else {
+ *have_meta = false;
+ return ENA_COM_OK;
+ }
}
-static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
+static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
struct ena_eth_io_rx_cdesc_base *cdesc)
{
ena_rx_ctx->l3_proto = cdesc->status &
@@ -401,24 +426,26 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
}
if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV
- && !buffer_to_push))
+ && !buffer_to_push)) {
+ ena_trc_err("push header wasn't provided on LLQ mode\n");
return ENA_COM_INVAL;
+ }
rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
if (unlikely(rc))
return rc;
- have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
- ena_tx_ctx);
- if (have_meta) {
- rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
- if (unlikely(rc))
- return rc;
+ rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
+ if (unlikely(rc)) {
+ ena_trc_err("failed to create and store tx meta desc\n");
+ return rc;
}
/* If the caller doesn't want to send packets */
if (unlikely(!num_bufs && !header_len)) {
rc = ena_com_close_bounce_buffer(io_sq);
+ if (rc)
+ ena_trc_err("failed to write buffers to LLQ\n");
*nb_hw_desc = io_sq->tail - start_tail;
return rc;
}
@@ -478,8 +505,10 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
/* The first desc share the same desc as the header */
if (likely(i != 0)) {
rc = ena_com_sq_update_tail(io_sq);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ ena_trc_err("failed to update sq tail\n");
return rc;
+ }
desc = get_sq_desc(io_sq);
if (unlikely(!desc))
@@ -508,10 +537,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
rc = ena_com_sq_update_tail(io_sq);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ ena_trc_err("failed to update sq tail of the last descriptor\n");
return rc;
+ }
rc = ena_com_close_bounce_buffer(io_sq);
+ if (rc)
+ ena_trc_err("failed when closing bounce buffer\n");
*nb_hw_desc = io_sq->tail - start_tail;
return rc;
@@ -525,7 +558,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
u16 cdesc_idx = 0;
u16 nb_hw_desc;
- u16 i;
+ u16 i = 0;
ENA_WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX,
"wrong Q type");
@@ -545,13 +578,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
return ENA_COM_NO_SPACE;
}
- for (i = 0; i < nb_hw_desc; i++) {
- cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
+ cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
+ ena_rx_ctx->pkt_offset = cdesc->offset;
+ do {
ena_buf->len = cdesc->length;
ena_buf->req_id = cdesc->req_id;
ena_buf++;
- }
+ } while ((++i < nb_hw_desc) && (cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i)));
/* Update SQ head ptr */
io_sq->next_to_comp += nb_hw_desc;
diff --git a/ena_eth_com.h b/ena_eth_com.h
index ccbbf50b6b2d..4b91221ea093 100644
--- a/ena_eth_com.h
+++ b/ena_eth_com.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,6 +77,7 @@ struct ena_com_rx_ctx {
u32 hash;
u16 descs;
int max_bufs;
+ u8 pkt_offset;
};
int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
@@ -99,7 +100,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
ENA_REG_WRITE32(io_cq->bus, intr_reg->intr_control, io_cq->unmask_reg);
}
-static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
+static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
{
u16 tail, next_to_comp, cnt;
@@ -117,7 +118,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
int temp;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- return ena_com_free_desc(io_sq) >= required_buffers;
+ return ena_com_free_q_entries(io_sq) >= required_buffers;
/* This calculation doesn't need to be 100% accurate. So to reduce
* the calculation overhead just Subtract 2 lines from the free descs
@@ -126,7 +127,7 @@ static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
*/
temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
- return ena_com_free_desc(io_sq) > temp;
+ return ena_com_free_q_entries(io_sq) > temp;
}
static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
@@ -160,7 +161,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
llq_info = &io_sq->llq_info;
num_descs = ena_tx_ctx->num_bufs;
- if (unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
+ if (llq_info->disable_meta_caching ||
+ unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
++num_descs;
if (num_descs > llq_info->descs_num_before_header) {
@@ -177,8 +179,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
{
- u16 tail = io_sq->tail;
u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
+ u16 tail = io_sq->tail;
ena_trc_dbg("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
@@ -199,15 +201,17 @@ static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
u16 unreported_comp, head;
bool need_update;
- head = io_cq->head;
- unreported_comp = head - io_cq->last_head_update;
- need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
-
- if (io_cq->cq_head_db_reg && need_update) {
- ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
- io_cq->qid, head);
- ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
- io_cq->last_head_update = head;
+ if (unlikely(io_cq->cq_head_db_reg)) {
+ head = io_cq->head;
+ unreported_comp = head - io_cq->last_head_update;
+ need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
+
+ if (unlikely(need_update)) {
+ ena_trc_dbg("Write completion queue doorbell for queue %d: head: %d\n",
+ io_cq->qid, head);
+ ENA_REG_WRITE32(io_cq->bus, head, io_cq->cq_head_db_reg);
+ io_cq->last_head_update = head;
+ }
}
return 0;
diff --git a/ena_plat.h b/ena_plat.h
index ef08ec22d178..e3536cdf3573 100644
--- a/ena_plat.h
+++ b/ena_plat.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2015-2019 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2020 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -65,6 +65,7 @@ __FBSDID("$FreeBSD$");
#include <machine/in_cksum.h>
#include <machine/pcpu.h>
#include <machine/resource.h>
+#include <machine/_inttypes.h>
#include <net/bpf.h>
#include <net/ethernet.h>
@@ -103,6 +104,7 @@ extern struct ena_bus_space ebs;
#define ENA_RSC (1 << 6) /* Goes with TXPTH or RXPTH, free/alloc res. */
#define ENA_IOQ (1 << 7) /* Detailed info about IO queues. */
#define ENA_ADMQ (1 << 8) /* Detailed info about admin queue. */
+#define ENA_NETMAP (1 << 9) /* Detailed info about netmap. */
extern int ena_log_level;
@@ -123,8 +125,8 @@ extern int ena_log_level;
#define ena_trc_warn(format, arg...) ena_trace(ENA_WARNING, format, ##arg)
#define ena_trc_err(format, arg...) ena_trace(ENA_ALERT, format, ##arg)
-#define unlikely(x) __predict_false(x)
-#define likely(x) __predict_true(x)
+#define unlikely(x) __predict_false(!!(x))
+#define likely(x) __predict_true(!!(x))
#define __iomem
#define ____cacheline_aligned __aligned(CACHE_LINE_SIZE)
@@ -185,10 +187,11 @@ static inline long PTR_ERR(const void *ptr)
#define ENA_COM_TIMER_EXPIRED ETIMEDOUT
#define ENA_MSLEEP(x) pause_sbt("ena", SBT_1MS * (x), SBT_1MS, 0)
+#define ENA_USLEEP(x) pause_sbt("ena", SBT_1US * (x), SBT_1US, 0)
#define ENA_UDELAY(x) DELAY(x)
#define ENA_GET_SYSTEM_TIMEOUT(timeout_us) \
((long)cputick2usec(cpu_ticks()) + (timeout_us))
-#define ENA_TIME_EXPIRE(timeout) ((timeout) < (long)cputick2usec(cpu_ticks()))
+#define ENA_TIME_EXPIRE(timeout) ((timeout) < cputick2usec(cpu_ticks()))
#define ENA_MIGHT_SLEEP()
#define min_t(type, _x, _y) ((type)(_x) < (type)(_y) ? (type)(_x) : (type)(_y))
@@ -275,11 +278,23 @@ struct ena_bus {
typedef uint32_t ena_atomic32_t;
+#define ENA_PRIu64 PRIu64
+
+typedef uint64_t ena_time_t;
+
void ena_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nseg,
int error);
int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
int mapflags);
+static inline uint32_t
+ena_reg_read32(struct ena_bus *bus, bus_size_t offset)
+{
+ uint32_t v = bus_space_read_4(bus->reg_bar_t, bus->reg_bar_h, offset);
+ rmb();
+ return v;
+}
+
#define ENA_MEMCPY_TO_DEVICE_64(dst, src, size) \
do { \
int count, i; \
@@ -293,7 +308,11 @@ int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
#define ENA_MEM_ALLOC(dmadev, size) malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO)
#define ENA_MEM_ALLOC_NODE(dmadev, size, virt, node, dev_node) (virt = NULL)
-#define ENA_MEM_FREE(dmadev, ptr) free(ptr, M_DEVBUF)
+#define ENA_MEM_FREE(dmadev, ptr, size) \
+ do { \
+ (void)(size); \
+ free(ptr, M_DEVBUF); \
+ } while (0)
#define ENA_MEM_ALLOC_COHERENT_NODE(dmadev, size, virt, phys, handle, node, \
dev_node) \
do { \
@@ -320,18 +339,19 @@ int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
/* Register R/W methods */
#define ENA_REG_WRITE32(bus, value, offset) \
+ do { \
+ wmb(); \
+ ENA_REG_WRITE32_RELAXED(bus, value, offset); \
+ } while (0)
+
+#define ENA_REG_WRITE32_RELAXED(bus, value, offset) \
bus_space_write_4( \
((struct ena_bus*)bus)->reg_bar_t, \
((struct ena_bus*)bus)->reg_bar_h, \
(bus_size_t)(offset), (value))
-#define ENA_REG_WRITE32_RELAXED(bus, value, offset) \
- ENA_REG_WRITE32(bus, value, offset)
#define ENA_REG_READ32(bus, offset) \
- bus_space_read_4( \
- ((struct ena_bus*)bus)->reg_bar_t, \
- ((struct ena_bus*)bus)->reg_bar_h, \
- (bus_size_t)(offset))
+ ena_reg_read32((struct ena_bus*)(bus), (bus_size_t)(offset))
#define ENA_DB_SYNC_WRITE(mem_handle) bus_dmamap_sync( \
(mem_handle)->tag, (mem_handle)->map, BUS_DMASYNC_PREWRITE)
@@ -383,6 +403,12 @@ int ena_dma_alloc(device_t dmadev, bus_size_t size, ena_mem_handle_t *dma,
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define ENA_FFS(x) ffs(x)
+
+void ena_rss_key_fill(void *key, size_t size);
+
+#define ENA_RSS_FILL_KEY(key, size) ena_rss_key_fill(key, size)
+
#include "ena_defs/ena_includes.h"
#endif /* ENA_PLAT_H_ */