diff --git a/src/audio/kpb.c b/src/audio/kpb.c index a71d9bd7fbc0..b34bc7a95e8a 100644 --- a/src/audio/kpb.c +++ b/src/audio/kpb.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -56,27 +57,30 @@ struct comp_data { bool is_internal_buffer_full; size_t buffered_data; struct dd draining_task_data; - size_t buffer_size; + size_t kpb_buffer_size; + size_t host_buffer_size; + size_t host_period_size; }; /*! KPB private functions */ static void kpb_event_handler(int message, void *cb_data, void *event_data); static int kpb_register_client(struct comp_data *kpb, struct kpb_client *cli); -static void kpb_init_draining(struct comp_data *kpb, struct kpb_client *cli); +static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli); static uint64_t kpb_draining_task(void *arg); -static void kpb_buffer_data(struct comp_data *kpb, struct comp_buffer *source, - size_t size); +static int kpb_buffer_data(struct comp_dev *dev, struct comp_buffer *source, + size_t size); static size_t kpb_allocate_history_buffer(struct comp_data *kpb); static void kpb_clear_history_buffer(struct hb *buff); static void kpb_free_history_buffer(struct hb *buff); -static bool kpb_has_enough_history_data(struct comp_data *kpb, - struct hb *buff, size_t his_req); static inline bool kpb_is_sample_width_supported(uint32_t sampling_width); static void kpb_copy_samples(struct comp_buffer *sink, struct comp_buffer *source, size_t size, size_t sample_width); static void kpb_drain_samples(void *source, struct comp_buffer *sink, size_t size, size_t sample_width); +static void kpb_reset_history_buffer(struct hb *buff); +static inline bool validate_host_params(size_t host_period_size, + size_t host_buffer_size); /** * \brief Create a key phrase buffer component. @@ -90,8 +94,7 @@ static struct comp_dev *kpb_new(struct sof_ipc_comp *comp) (struct sof_ipc_comp_process *)comp; size_t bs = ipc_process->size; struct comp_dev *dev; - struct comp_data *cd; - size_t allocated_size; + struct comp_data *kpb; trace_kpb("kpb_new()"); @@ -109,63 +112,43 @@ static struct comp_dev *kpb_new(struct sof_ipc_comp *comp) assert(!memcpy_s(&dev->comp, sizeof(struct sof_ipc_comp_process), comp, sizeof(struct sof_ipc_comp_process))); - cd = rzalloc(RZONE_RUNTIME, SOF_MEM_CAPS_RAM, sizeof(*cd)); - if (!cd) { + kpb = rzalloc(RZONE_RUNTIME, SOF_MEM_CAPS_RAM, sizeof(*kpb)); + if (!kpb) { rfree(dev); return NULL; } - comp_set_drvdata(dev, cd); + comp_set_drvdata(dev, kpb); - assert(!memcpy_s(&cd->config, sizeof(cd->config), ipc_process->data, + assert(!memcpy_s(&kpb->config, sizeof(kpb->config), ipc_process->data, bs)); - if (!kpb_is_sample_width_supported(cd->config.sampling_width)) { + if (!kpb_is_sample_width_supported(kpb->config.sampling_width)) { trace_kpb_error("kpb_new() error: " "requested sampling width not supported"); return NULL; } - /* Sampling width accepted. Lets calculate and store - * its derivatives for quick lookup in runtime. - */ - cd->buffer_size = KPB_MAX_BUFFER_SIZE(cd->config.sampling_width); - - if (cd->config.no_channels > KPB_MAX_SUPPORTED_CHANNELS) { + if (kpb->config.no_channels > KPB_MAX_SUPPORTED_CHANNELS) { trace_kpb_error("kpb_new() error: " "no of channels exceeded the limit"); return NULL; } - if (cd->config.history_depth > cd->buffer_size) { - trace_kpb_error("kpb_new() error: " - "history depth exceeded the limit"); - return NULL; - } - - if (cd->config.sampling_freq != KPB_SAMPLNG_FREQUENCY) { + if (kpb->config.sampling_freq != KPB_SAMPLNG_FREQUENCY) { trace_kpb_error("kpb_new() error: " "requested sampling frequency not supported"); return NULL; } - dev->state = COMP_STATE_READY; - - /* Zero number of clients */ - cd->kpb_no_of_clients = 0; - /* Set initial state as buffering */ - cd->state = KPB_STATE_BUFFERING; + /* Init basic component data */ + kpb->history_buffer = NULL; + kpb->kpb_no_of_clients = 0; - /* Allocate history buffer */ - allocated_size = kpb_allocate_history_buffer(cd); - - /* Have we allocated what we requested? */ - if (allocated_size < cd->buffer_size) { - trace_kpb_error("Failed to allocate space for " - "KPB buffer/s"); - return NULL; - } + /* Kpb has been created successfully */ + dev->state = COMP_STATE_READY; + kpb->state = KPB_STATE_CREATED; return dev; } @@ -181,7 +164,7 @@ static size_t kpb_allocate_history_buffer(struct comp_data *kpb) struct hb *history_buffer; struct hb *new_hb = NULL; /*! Total allocation size */ - size_t hb_size = kpb->buffer_size; + size_t hb_size = kpb->kpb_buffer_size; /*! Current allocation size */ size_t ca_size = hb_size; /*! Memory caps priorites for history buffer */ @@ -310,8 +293,12 @@ static void kpb_free(struct comp_dev *dev) trace_kpb("kpb_free()"); + /* Unregister KPB from async notification */ + notifier_unregister(&kpb->kpb_events); + /* Reclaim memory occupied by history buffer */ kpb_free_history_buffer(kpb->history_buffer); + kpb->history_buffer = NULL; /* Free KPB */ rfree(kpb); @@ -341,14 +328,22 @@ static int kpb_trigger(struct comp_dev *dev, int cmd) */ static int kpb_prepare(struct comp_dev *dev) { - struct comp_data *cd = comp_get_drvdata(dev); + struct comp_data *kpb = comp_get_drvdata(dev); int ret = 0; int i; struct list_item *blist; struct comp_buffer *sink; + size_t allocated_size; trace_kpb("kpb_prepare()"); + if (kpb->state == KPB_STATE_RESETTING || + kpb->state == KPB_STATE_RESET_FINISHING) { + trace_kpb_error("kpb_prepare() error: can not prepare KPB " + "due to ongoing reset."); + return -EBUSY; + } + ret = comp_set_state(dev, COMP_TRIGGER_PREPARE); if (ret < 0) return ret; @@ -357,33 +352,50 @@ static int kpb_prepare(struct comp_dev *dev) return PPL_STATUS_PATH_STOP; /* Init private data */ - cd->kpb_no_of_clients = 0; - cd->buffered_data = 0; - cd->state = KPB_STATE_BUFFERING; - + kpb->state = KPB_STATE_PREPARING; + kpb->kpb_no_of_clients = 0; + kpb->buffered_data = 0; + kpb->host_buffer_size = dev->params.buffer.size; + kpb->host_period_size = dev->params.host_period_bytes; + kpb->config.sampling_width = dev->params.sample_container_bytes * 8; + kpb->kpb_buffer_size = KPB_MAX_BUFFER_SIZE(kpb->config.sampling_width); + + if (!kpb->history_buffer) { + /* Allocate history buffer */ + allocated_size = kpb_allocate_history_buffer(kpb); + + /* Have we allocated what we requested? */ + if (allocated_size < kpb->kpb_buffer_size) { + trace_kpb_error("Failed to allocate space for " + "KPB buffer/s"); + kpb_free_history_buffer(kpb->history_buffer); + kpb->history_buffer = NULL; + return -EINVAL; + } + } /* Init history buffer */ - kpb_clear_history_buffer(cd->history_buffer); + kpb_clear_history_buffer(kpb->history_buffer); /* Initialize clients data */ for (i = 0; i < KPB_MAX_NO_OF_CLIENTS; i++) { - cd->clients[i].state = KPB_CLIENT_UNREGISTERED; - cd->clients[i].r_ptr = NULL; + kpb->clients[i].state = KPB_CLIENT_UNREGISTERED; + kpb->clients[i].r_ptr = NULL; } /* Initialize KPB events */ - cd->kpb_events.id = NOTIFIER_ID_KPB_CLIENT_EVT; - cd->kpb_events.cb_data = cd; - cd->kpb_events.cb = kpb_event_handler; + kpb->kpb_events.id = NOTIFIER_ID_KPB_CLIENT_EVT; + kpb->kpb_events.cb_data = dev; + kpb->kpb_events.cb = kpb_event_handler; /* Register KPB for async notification */ - notifier_register(&cd->kpb_events); + notifier_register(&kpb->kpb_events); /* Initialize draining task */ - schedule_task_init(&cd->draining_task, /* task structure */ + schedule_task_init(&kpb->draining_task, /* task structure */ SOF_SCHEDULE_EDF, /* utilize EDF scheduler */ 0, /* priority doesn't matter for IDLE tasks */ kpb_draining_task, /* task function */ - &cd->draining_task_data, /* task private data */ + &kpb->draining_task_data, /* task private data */ 0, /* core on which we should run */ 0); /* not used flags */ @@ -400,13 +412,15 @@ static int kpb_prepare(struct comp_dev *dev) } if (sink->sink->comp.type == SOF_COMP_SELECTOR) { /* We found proper real time sink */ - cd->sel_sink = sink; + kpb->sel_sink = sink; } else if (sink->sink->comp.type == SOF_COMP_HOST) { /* We found proper host sink */ - cd->host_sink = sink; + kpb->host_sink = sink; } } + kpb->state = KPB_STATE_RUN; + return ret; } @@ -441,20 +455,33 @@ static int kpb_reset(struct comp_dev *dev) trace_kpb("kpb_reset()"); - /* Reset state to be buffering */ - kpb->state = KPB_STATE_BUFFERING; - /* Reset history buffer */ + kpb->buffered_data = 0; kpb->is_internal_buffer_full = false; - kpb_clear_history_buffer(kpb->history_buffer); - /* Reset amount of buffered data */ - kpb->buffered_data = 0; + /* Change KPB state to RESET. If there is any ongoing job it will + * shut itself gracefully first. + */ + if (kpb->state == KPB_STATE_BUFFERING || + kpb->state == KPB_STATE_DRAINING) { + /* KPB is performing some task now, + * terminate it gently. + */ + kpb->state = KPB_STATE_RESETTING; + return -EBUSY; + } + + if (kpb->history_buffer) { + /* Reset history buffer - zero its data reset pointers + * and states. + */ + kpb_reset_history_buffer(kpb->history_buffer); + } /* Unregister KPB for async notification */ notifier_unregister(&kpb->kpb_events); - /* Reset KPB state to initial buffering state */ - kpb->state = KPB_STATE_BUFFERING; + /* Finally KPB is ready after reset */ + kpb->state = KPB_STATE_PREPARING; return comp_set_state(dev, COMP_TRIGGER_RESET); } @@ -474,51 +501,109 @@ static int kpb_copy(struct comp_dev *dev) { int ret = 0; struct comp_data *kpb = comp_get_drvdata(dev); - struct comp_buffer *source; - struct comp_buffer *sink; + struct comp_buffer *source = NULL; + struct comp_buffer *sink = NULL; size_t copy_bytes = 0; + size_t sample_width = kpb->config.sampling_width; tracev_kpb("kpb_copy()"); /* Get source and sink buffers */ source = list_first_item(&dev->bsource_list, struct comp_buffer, sink_list); - sink = (kpb->state == KPB_STATE_BUFFERING) ? kpb->sel_sink - : kpb->host_sink; - /* Stop copying downstream if in draining mode */ - if (kpb->state == KPB_STATE_DRAINING) { - comp_update_buffer_consume(source, source->avail); - return PPL_STATUS_PATH_STOP; + /* Validate source */ + if (!source || !source->r_ptr) { + trace_kpb_error("kpb_copy(): invalid source pointers."); + ret = -EINVAL; + goto out; } - /* Process source data */ - /* Check if there are valid pointers */ - if (!source || !sink) - return -EIO; - if (!source->r_ptr || !sink->w_ptr) - return -EINVAL; + switch (kpb->state) { + case KPB_STATE_RUN: + /* In normal RUN state we simply copy to our sink. */ + sink = kpb->sel_sink; - /* Sink and source are both ready and have space */ - copy_bytes = MIN(sink->free, source->avail); - kpb_copy_samples(sink, source, copy_bytes, - kpb->config.sampling_width); + /* Validate sink */ + if (!sink || !sink->w_ptr) { + trace_kpb_error("kpb_copy(): invalid selector " + "sink pointers."); + ret = -EINVAL; + goto out; + } - /* Buffer source data internally in history buffer for future - * use by clients. - */ - if (source->avail <= kpb->buffer_size) { - kpb_buffer_data(kpb, source, copy_bytes); + copy_bytes = MIN(sink->free, source->avail); + kpb_copy_samples(sink, source, copy_bytes, sample_width); - if (kpb->buffered_data < kpb->buffer_size) - kpb->buffered_data += copy_bytes; - else - kpb->is_internal_buffer_full = true; - } + /* Buffer source data internally in history buffer for future + * use by clients. + */ + if (source->avail <= kpb->kpb_buffer_size) { + ret = kpb_buffer_data(dev, source, copy_bytes); + if (ret) { + trace_kpb_error("kpb_copy(): internal " + "buffering failed."); + goto out; + } + if (kpb->buffered_data < kpb->kpb_buffer_size) + kpb->buffered_data += copy_bytes; + else + kpb->is_internal_buffer_full = true; + } else { + trace_kpb_error("kpb_copy(): too much data to buffer."); + } - comp_update_buffer_produce(sink, copy_bytes); - comp_update_buffer_consume(source, copy_bytes); + comp_update_buffer_produce(sink, copy_bytes); + comp_update_buffer_consume(source, copy_bytes); + ret = 0; + break; + case KPB_STATE_HOST_COPY: + /* In host copy state we only copy to host buffer. */ + sink = kpb->host_sink; + + /* Validate sink */ + if (!sink || !sink->w_ptr) { + trace_kpb_error("kpb_copy(): invalid host " + "sink pointers."); + ret = -EINVAL; + goto out; + } + + copy_bytes = MIN(sink->free, source->avail); + kpb_copy_samples(sink, source, copy_bytes, sample_width); + + comp_update_buffer_produce(sink, copy_bytes); + comp_update_buffer_consume(source, copy_bytes); + + ret = 0; + break; + case KPB_STATE_DRAINING: + /* In draining state we only buffer data in internal, + * history buffer. + */ + if (source->avail <= kpb->kpb_buffer_size) { + ret = kpb_buffer_data(dev, source, copy_bytes); + if (ret) { + trace_kpb_error("kpb_copy(): internal " + "buffering failed."); + goto out; + } + + comp_update_buffer_consume(source, source->avail); + } else { + trace_kpb_error("kpb_copy(): too much data to buffer."); + } + + ret = PPL_STATUS_PATH_STOP; + break; + default: + trace_kpb_error("kpb_copy(): wrong state! Copy forbidden."); + ret = -EIO; + break; + } + +out: return ret; } @@ -530,18 +615,47 @@ static int kpb_copy(struct comp_dev *dev) * \param[in] source pointer to the buffer source. * */ -static void kpb_buffer_data(struct comp_data *kpb, struct comp_buffer *source, - size_t size) +static int kpb_buffer_data(struct comp_dev *dev, struct comp_buffer *source, + size_t size) { + int ret = 0; size_t size_to_copy = size; size_t space_avail; + struct comp_data *kpb = comp_get_drvdata(dev); struct hb *buff = kpb->history_buffer; void *read_ptr = source->r_ptr; + size_t timeout = platform_timer_get(platform_timer) + + clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1); + enum kpb_state state_preserved = kpb->state; + struct dd *draining_data = &kpb->draining_task_data; tracev_kpb("kpb_buffer_data()"); + if (kpb->state != KPB_STATE_RUN && kpb->state != KPB_STATE_DRAINING) + return PPL_STATUS_PATH_STOP; + + kpb->state = KPB_STATE_BUFFERING; + + if (kpb->state == KPB_STATE_DRAINING) + draining_data->buffered_while_draining += size_to_copy; + /* Let's store audio stream data in internal history buffer */ while (size_to_copy) { + /* Reset was requested, it's time to stop buffering and finish + * KPB reset. + */ + if (kpb->state == KPB_STATE_RESETTING) { + kpb->state = KPB_STATE_RESET_FINISHING; + kpb_reset(dev); + return PPL_STATUS_PATH_STOP; + } + + /* Are we stuck in buffering? */ + if (timeout < platform_timer_get(platform_timer)) { + trace_kpb_error("kpb_buffer_data(): timeout."); + return -ETIME; + } + /* Check how much space there is in current write buffer */ space_avail = (uint32_t)buff->end_addr - (uint32_t)buff->w_ptr; @@ -596,6 +710,9 @@ static void kpb_buffer_data(struct comp_data *kpb, struct comp_buffer *source, buff->state = KPB_BUFFER_FREE; } } + + kpb->state = state_preserved; + return ret; } /** @@ -608,7 +725,8 @@ static void kpb_buffer_data(struct comp_data *kpb, struct comp_buffer *source, static void kpb_event_handler(int message, void *cb_data, void *event_data) { (void)message; - struct comp_data *kpb = (struct comp_data *)cb_data; + struct comp_dev *dev = (struct comp_dev *)cb_data; + struct comp_data *kpb = comp_get_drvdata(dev); struct kpb_event_data *evd = (struct kpb_event_data *)event_data; struct kpb_client *cli = (struct kpb_client *)evd->client_data; @@ -623,7 +741,7 @@ static void kpb_event_handler(int message, void *cb_data, void *event_data) /*TODO*/ break; case KPB_EVENT_BEGIN_DRAINING: - kpb_init_draining(kpb, cli); + kpb_init_draining(dev, cli); break; case KPB_EVENT_STOP_DRAINING: /*TODO*/ @@ -689,31 +807,49 @@ static int kpb_register_client(struct comp_data *kpb, struct kpb_client *cli) * \param[in] cli - client's data. * */ -static void kpb_init_draining(struct comp_data *kpb, struct kpb_client *cli) +static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli) { + struct comp_data *kpb = comp_get_drvdata(dev); bool is_sink_ready = (kpb->host_sink->sink->state == COMP_STATE_ACTIVE); size_t sample_width = kpb->config.sampling_width; size_t history_depth = cli->history_depth * kpb->config.no_channels * (kpb->config.sampling_freq / 1000) * - (sample_width / 8); + (KPB_SAMPLE_CONTAINER_SIZE(sample_width) / 8); struct hb *buff = kpb->history_buffer; struct hb *first_buff = buff; size_t buffered = 0; size_t local_buffered = 0; - enum comp_copy_type copy_type = COMP_COPY_BLOCKING; - - trace_kpb("kpb_init_draining()"); - - if (cli->id > KPB_MAX_NO_OF_CLIENTS) { + enum comp_copy_type copy_type = COMP_COPY_NORMAL; + size_t drain_interval = 0; + size_t host_period_size = kpb->host_period_size; + size_t host_buffer_size = kpb->host_buffer_size; + size_t ticks_per_ms = clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1); + size_t bytes_per_ms = KPB_SAMPLING_WIDTH * + (KPB_SAMPLE_CONTAINER_SIZE(sample_width) / 8) * + kpb->config.no_channels; + size_t period_bytes_limit = 0; + + trace_kpb("kpb_init_draining(): requested draining of %d [ms] from " + "history buffer", cli->history_depth); + + if (kpb->state != KPB_STATE_RUN) { + trace_kpb_error("kpb_init_draining() error: " + "wrong KPB state"); + } else if (cli->id > KPB_MAX_NO_OF_CLIENTS) { trace_kpb_error("kpb_init_draining() error: " "wrong client id"); /* TODO: check also if client is registered */ } else if (!is_sink_ready) { trace_kpb_error("kpb_init_draining() error: " "sink not ready for draining"); - } else if (!kpb_has_enough_history_data(kpb, buff, history_depth)) { + } else if (kpb->buffered_data < history_depth || + kpb->kpb_buffer_size < history_depth) { trace_kpb_error("kpb_init_draining() error: " "not enough data in history buffer"); + } else if (!validate_host_params(host_period_size, + host_buffer_size)) { + trace_kpb_error("kpb_init_draining() error: " + "wrong host params."); } else { /* Draining accepted, find proper buffer to start reading * At this point we are guaranteed that there is enough data @@ -770,6 +906,17 @@ static void kpb_init_draining(struct comp_data *kpb, struct kpb_client *cli) } while (buff != first_buff); + /* Calculate time in clock ticks each draining event shall + * take place. This time will be used to synchronize us with + * an end application interrupts. + */ + drain_interval = (host_period_size / bytes_per_ms) * + ticks_per_ms; + /* In draining intervals we fill only half of host buffer. + * This was we are safe to not overflow it. + */ + period_bytes_limit = host_buffer_size / 2; + trace_kpb("kpb_init_draining(), schedule draining task"); /* Add one-time draining task into the scheduler. */ @@ -778,14 +925,20 @@ static void kpb_init_draining(struct comp_data *kpb, struct kpb_client *cli) kpb->draining_task_data.history_depth = history_depth; kpb->draining_task_data.state = &kpb->state; kpb->draining_task_data.sample_width = sample_width; - - /* Change KPB internal state to DRAINING */ - kpb->state = KPB_STATE_DRAINING; + kpb->draining_task_data.drain_interval = drain_interval; + kpb->draining_task_data.pb_limit = period_bytes_limit; + kpb->draining_task_data.dev = dev; /* Set host-sink copy mode to blocking */ comp_set_attribute(kpb->host_sink->sink, COMP_ATTR_COPY_TYPE, ©_type); + /* Pause selector copy. */ + kpb->sel_sink->sink->state = COMP_STATE_PAUSED; + + /* Disable system agent during draining */ + sa_disable(); + /* Schedule draining task */ schedule_task(&kpb->draining_task, 0, 0, SOF_SCHEDULE_FLAG_IDLE); @@ -811,14 +964,43 @@ static uint64_t kpb_draining_task(void *arg) size_t size_to_copy; bool move_buffer = false; uint32_t drained = 0; - uint64_t time; + uint64_t draining_time_start = 0; + uint64_t draining_time_end = 0; enum comp_copy_type copy_type = COMP_COPY_NORMAL; + uint64_t drain_interval = draining_data->drain_interval; + uint64_t next_copy_time = 0; + uint64_t current_time = 0; + size_t period_bytes = 0; + size_t period_bytes_limit = draining_data->pb_limit; + size_t period_copy_start = platform_timer_get(platform_timer); + size_t time_taken = 0; + size_t *rt_stream_update = &draining_data->buffered_while_draining; trace_kpb("kpb_draining_task(), start."); - time = platform_timer_get(platform_timer); + /* Change KPB internal state to DRAINING */ + *draining_data->state = KPB_STATE_DRAINING; + + draining_time_start = platform_timer_get(platform_timer); while (history_depth > 0) { + /* Have we received reset request? */ + if (*draining_data->state == KPB_STATE_RESETTING) { + *draining_data->state = KPB_STATE_RESET_FINISHING; + kpb_reset(draining_data->dev); + goto out; + } + /* Are we ready to drain further or host still need some time + * to read the data already provided? + */ + if (next_copy_time > platform_timer_get(platform_timer)) { + period_bytes = 0; + period_copy_start = platform_timer_get(platform_timer); + continue; + } else if (next_copy_time == 0) { + period_copy_start = platform_timer_get(platform_timer); + } + size_to_read = (uint32_t)buff->end_addr - (uint32_t)buff->r_ptr; if (size_to_read > sink->free) { @@ -827,7 +1009,7 @@ static uint64_t kpb_draining_task(void *arg) else size_to_copy = sink->free; } else { - if (size_to_read >= history_depth) { + if (size_to_read > history_depth) { size_to_copy = history_depth; } else { size_to_copy = size_to_read; @@ -841,29 +1023,56 @@ static uint64_t kpb_draining_task(void *arg) buff->r_ptr += (uint32_t)size_to_copy; history_depth -= size_to_copy; drained += size_to_copy; + period_bytes += size_to_copy; if (move_buffer) { buff->r_ptr = buff->start_addr; buff = buff->next; move_buffer = false; } + if (size_to_copy) comp_update_buffer_produce(sink, size_to_copy); - } - time = platform_timer_get(platform_timer) - time; + if (period_bytes >= period_bytes_limit) { + current_time = platform_timer_get(platform_timer); + time_taken = current_time - period_copy_start; + next_copy_time = current_time + drain_interval - + time_taken; + } + + if (history_depth == 0) { + /* We have finished draining of requested data however + * while we were draining real time stream could provided + * new data which needs to be copy to host. + */ + trace_kpb("kpb: update history_depth by %d", + *rt_stream_update); + history_depth += *rt_stream_update; + *rt_stream_update = 0; + } + } +out: + draining_time_end = platform_timer_get(platform_timer); /* Draining is done. Now switch KPB to copy real time stream * to client's sink. This state is called "draining on demand" + * Note! If KPB state changed during draining due to i.e reset request + * we should not change that state. */ - *draining_data->state = KPB_STATE_HOST_COPY; + *draining_data->state = (*draining_data->state == KPB_STATE_DRAINING) ? + KPB_STATE_HOST_COPY : *draining_data->state; /* Reset host-sink copy mode back to unblocking */ comp_set_attribute(sink->sink, COMP_ATTR_COPY_TYPE, ©_type); - trace_kpb("kpb_draining_task(), done. %u drained in %d ms.", - drained, - time / clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1)); + trace_kpb("KPB: kpb_draining_task(), done. %u drained in %d ms", + drained, + (draining_time_end - draining_time_start) + / clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1)); + + /* Enable system agent back */ + sa_enable(); return 0; } @@ -881,18 +1090,28 @@ static uint64_t kpb_draining_task(void *arg) static void kpb_drain_samples(void *source, struct comp_buffer *sink, size_t size, size_t sample_width) { - int16_t *dest; - int16_t *src = (int16_t *)source; - uint32_t i; - uint32_t j = 0; - uint32_t channel; - uint32_t frames = KPB_BYTES_TO_FRAMES(size, sample_width); + void *dst; + void *src = source; + size_t i; + size_t j = 0; + size_t channel; + size_t frames = KPB_BYTES_TO_FRAMES(size, sample_width); for (i = 0; i < frames; i++) { for (channel = 0; channel < KPB_NR_OF_CHANNELS; channel++) { - dest = buffer_write_frag_s16(sink, j); - *dest = *src; - src++; + if (sample_width == 16) { + dst = buffer_write_frag_s16(sink, j); + *((int16_t *)dst) = *((int16_t *)src); + src = ((int16_t *)src) + 1; + } else if (sample_width == 32 || sample_width == 24) { + dst = buffer_write_frag_s32(sink, j); + *((int32_t *)dst) = *((int32_t *)src); + src = ((int32_t *)src) + 1; + } else { + trace_kpb_error("KPB: An attempt to copy " + "not supported format!"); + return; + } j++; } } @@ -922,54 +1141,6 @@ static void kpb_clear_history_buffer(struct hb *buff) } while (buff != first_buff); } -/** - * \brief Verify if KPB has enough data buffered. - * - * \param[in] kpb - KPB component data pointer. - * \param[in] buff - pointer to current history buffer. - * \param[in] his_req - requested draining size. - * - * \return 1 if there is enough data in history buffer - * and 0 otherwise. - */ -static bool kpb_has_enough_history_data(struct comp_data *kpb, - struct hb *buff, size_t his_req) -{ - size_t buffered_data = 0; - struct hb *first_buff = buff; - - /* Quick check if we've already filled internal buffer */ - if (kpb->is_internal_buffer_full) - return his_req <= kpb->buffer_size; - - /* Internal buffer isn't full yet. Verify if what already buffered - * is sufficient for draining request. - */ - while (buffered_data < his_req) { - if (buff->state == KPB_BUFFER_FREE) { - if (buff->w_ptr == buff->start_addr && - buff->next->state == KPB_BUFFER_FULL) { - buffered_data += ((uint32_t)buff->end_addr - - (uint32_t)buff->start_addr); - } else { - buffered_data += ((uint32_t)buff->w_ptr - - (uint32_t)buff->start_addr); - } - - } else { - buffered_data += ((uint32_t)buff->end_addr - - (uint32_t)buff->start_addr); - } - - if (buff->next && buff->next != first_buff) - buff = buff->next; - else - break; - } - - return buffered_data >= his_req; -} - static inline bool kpb_is_sample_width_supported(uint32_t sampling_width) { bool ret; @@ -1002,23 +1173,81 @@ static void kpb_copy_samples(struct comp_buffer *sink, struct comp_buffer *source, size_t size, size_t sample_width) { - int16_t *src; - int16_t *dest; - uint32_t i; - uint32_t j = 0; - uint32_t channel; - uint32_t frames = KPB_BYTES_TO_FRAMES(size, sample_width); + void *dst; + void *src; + size_t i; + size_t j = 0; + size_t channel; + size_t frames = KPB_BYTES_TO_FRAMES(size, sample_width); for (i = 0; i < frames; i++) { for (channel = 0; channel < KPB_NR_OF_CHANNELS; channel++) { - src = buffer_read_frag_s16(source, j); - dest = buffer_write_frag_s16(sink, j); - *dest = *src; + if (sample_width == 16) { + dst = buffer_write_frag_s16(sink, j); + src = buffer_read_frag_s16(source, j); + *((int16_t *)dst) = *((int16_t *)src); + } else if (sample_width == 32 || sample_width == 24) { + dst = buffer_write_frag_s32(sink, j); + src = buffer_read_frag_s32(source, j); + *((int32_t *)dst) = *((int32_t *)src); + } else { + trace_kpb_error("KPB: An attempt to copy " + "not supported format!"); + return; + } j++; } } } +/** + * \brief Reset history buffer. + * \param[in] buff - pointer to current history buffer. + * + * \return none. + */ +static void kpb_reset_history_buffer(struct hb *buff) +{ + struct hb *first_buff = buff; + + trace_kpb("kpb_reset_history_buffer()"); + + if (!buff) + return; + + kpb_clear_history_buffer(buff); + + do { + buff->w_ptr = buff->start_addr; + buff->r_ptr = buff->start_addr; + buff->state = KPB_BUFFER_FREE; + + buff = buff->next; + + } while (buff != first_buff); +} + +static inline bool validate_host_params(size_t host_period_size, + size_t host_buffer_size) +{ + size_t drained_per_interval; + + if (host_period_size == 0 || host_buffer_size == 0) + return false; + + drained_per_interval = host_buffer_size / 2; + + /* Check host period size sanity. + * Here we check if host period size (which defines interval + * time) will allow us to drain more data then the interval + * takes - as only such condition guarantees draining will end. + * The formula: + * drained_data_in_one_interval_ms > interval_break_ms + * more + */ + + return (drained_per_interval > host_period_size) ? true : false; +} struct comp_driver comp_kpb = { .type = SOF_COMP_KPB, .ops = { diff --git a/src/include/sof/audio/kpb.h b/src/include/sof/audio/kpb.h index acdbba18e3ed..9ca9c6df65e2 100644 --- a/src/include/sof/audio/kpb.h +++ b/src/include/sof/audio/kpb.h @@ -38,9 +38,15 @@ struct comp_buffer; KPB_NR_OF_CHANNELS)) enum kpb_state { - KPB_STATE_BUFFERING = 0, + KPB_STATE_DISABLED = 0, + KPB_STATE_RESET_FINISHING, + KPB_STATE_CREATED, + KPB_STATE_PREPARING, + KPB_STATE_RUN, + KPB_STATE_BUFFERING, KPB_STATE_DRAINING, KPB_STATE_HOST_COPY, + KPB_STATE_RESETTING, }; enum kpb_event { @@ -99,6 +105,10 @@ struct dd { uint8_t is_draining_active; enum kpb_state *state; size_t sample_width; + size_t buffered_while_draining; + size_t drain_interval; + size_t pb_limit; /**< Period bytes limit */ + struct comp_dev *dev; }; #ifdef UNIT_TEST diff --git a/test/cmocka/src/audio/kpb/CMakeLists.txt b/test/cmocka/src/audio/kpb/CMakeLists.txt index bfcc390f1406..f10df4bccb54 100644 --- a/test/cmocka/src/audio/kpb/CMakeLists.txt +++ b/test/cmocka/src/audio/kpb/CMakeLists.txt @@ -5,6 +5,7 @@ cmocka_test(kpb kpb_buffer.c kpb_mock.c ${PROJECT_SOURCE_DIR}/src/audio/buffer.c + ${PROJECT_SOURCE_DIR}/src/lib/agent.c #${PROJECT_SOURCE_DIR}/src/audio/component.c ) target_link_libraries(kpb PRIVATE -lm) diff --git a/test/cmocka/src/audio/kpb/kpb_buffer.c b/test/cmocka/src/audio/kpb/kpb_buffer.c index c2ec09ae6b39..c5526594ae92 100644 --- a/test/cmocka/src/audio/kpb/kpb_buffer.c +++ b/test/cmocka/src/audio/kpb/kpb_buffer.c @@ -256,7 +256,7 @@ static void null_test_success(void **state) /* Test main function */ int main(void) { - struct CMUnitTest tests[2]; + struct CMUnitTest tests[1]; struct test_case internal_buffering = { .period_bytes = KPB_MAX_BUFFER_SIZE(16), .history_buffer_size = KPB_MAX_BUFFER_SIZE(16),