mirror of
https://github.com/rtic-rs/rtic.git
synced 2024-11-29 15:04:32 +01:00
book polish
This commit is contained in:
parent
a4552920e5
commit
bcd5f72647
11 changed files with 92 additions and 28 deletions
|
@ -17,6 +17,9 @@ The example below shows that `idle` runs after `init`.
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example idle
|
$ cargo run --target thumbv7m-none-eabi --example idle
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/idle.run}}
|
{{#include ../../../../rtic/ci/expected/idle.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -41,6 +44,9 @@ The following example shows how to enable sleep by setting the
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example idle-wfi
|
$ cargo run --target thumbv7m-none-eabi --example idle-wfi
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/idle-wfi.run}}
|
{{#include ../../../../rtic/ci/expected/idle-wfi.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -24,5 +24,8 @@ Running the example will print `init` to the console and then exit the QEMU proc
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example init
|
$ cargo run --target thumbv7m-none-eabi --example init
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/init.run}}
|
{{#include ../../../../rtic/ci/expected/init.run}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -11,6 +11,9 @@ RTIC is designed with resource efficiency in mind. RTIC itself does not rely on
|
||||||
For a minimal example you can expect something like:
|
For a minimal example you can expect something like:
|
||||||
``` console
|
``` console
|
||||||
$ cargo size --example smallest --target thumbv7m-none-eabi --release
|
$ cargo size --example smallest --target thumbv7m-none-eabi --release
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
Finished release [optimized] target(s) in 0.07s
|
Finished release [optimized] target(s) in 0.07s
|
||||||
text data bss dec hex filename
|
text data bss dec hex filename
|
||||||
924 0 0 924 39c smallest
|
924 0 0 924 39c smallest
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
Channels can be used to communicate data between running *software* tasks. The channel is essentially a wait queue, allowing tasks with multiple producers and a single receiver. A channel is constructed in the `init` task and backed by statically allocated memory. Send and receive endpoints are distributed to *software* tasks:
|
Channels can be used to communicate data between running *software* tasks. The channel is essentially a wait queue, allowing tasks with multiple producers and a single receiver. A channel is constructed in the `init` task and backed by statically allocated memory. Send and receive endpoints are distributed to *software* tasks:
|
||||||
|
|
||||||
```rust
|
``` rust
|
||||||
...
|
...
|
||||||
const CAPACITY: usize = 5;
|
const CAPACITY: usize = 5;
|
||||||
#[init]
|
#[init]
|
||||||
|
@ -20,7 +20,7 @@ In this case the channel holds data of `u32` type with a capacity of 5 elements
|
||||||
|
|
||||||
The `send` method post a message on the channel as shown below:
|
The `send` method post a message on the channel as shown below:
|
||||||
|
|
||||||
```rust
|
``` rust
|
||||||
#[task]
|
#[task]
|
||||||
async fn sender1(_c: sender1::Context, mut sender: Sender<'static, u32, CAPACITY>) {
|
async fn sender1(_c: sender1::Context, mut sender: Sender<'static, u32, CAPACITY>) {
|
||||||
hprintln!("Sender 1 sending: 1");
|
hprintln!("Sender 1 sending: 1");
|
||||||
|
@ -32,7 +32,7 @@ async fn sender1(_c: sender1::Context, mut sender: Sender<'static, u32, CAPACITY
|
||||||
|
|
||||||
The receiver can `await` incoming messages:
|
The receiver can `await` incoming messages:
|
||||||
|
|
||||||
```rust
|
``` rust
|
||||||
#[task]
|
#[task]
|
||||||
async fn receiver(_c: receiver::Context, mut receiver: Receiver<'static, u32, CAPACITY>) {
|
async fn receiver(_c: receiver::Context, mut receiver: Receiver<'static, u32, CAPACITY>) {
|
||||||
while let Ok(val) = receiver.recv().await {
|
while let Ok(val) = receiver.recv().await {
|
||||||
|
@ -42,6 +42,8 @@ async fn receiver(_c: receiver::Context, mut receiver: Receiver<'static, u32, CA
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Channels are implemented using a small (global) *Critical Section* (CS) for protection against race-conditions. The user must provide an CS implementation. Compiling the examples given the `--features test-critical-section` gives one possible implementation.
|
||||||
|
|
||||||
For a complete example:
|
For a complete example:
|
||||||
|
|
||||||
``` rust
|
``` rust
|
||||||
|
@ -50,6 +52,9 @@ For a complete example:
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example async-channel --features test-critical-section
|
$ cargo run --target thumbv7m-none-eabi --example async-channel --features test-critical-section
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/async-channel.run}}
|
{{#include ../../../../rtic/ci/expected/async-channel.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -80,6 +85,9 @@ In case all senders have been dropped `await` on an empty receiver channel resul
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example async-channel-no-sender --features test-critical-section
|
$ cargo run --target thumbv7m-none-eabi --example async-channel-no-sender --features test-critical-section
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/async-channel-no-sender.run}}
|
{{#include ../../../../rtic/ci/expected/async-channel-no-sender.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -93,6 +101,9 @@ The resulting error returns the data back to the sender, allowing the sender to
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example async-channel-no-receiver --features test-critical-section
|
$ cargo run --target thumbv7m-none-eabi --example async-channel-no-receiver --features test-critical-section
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/async-channel-no-receiver.run}}
|
{{#include ../../../../rtic/ci/expected/async-channel-no-receiver.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -108,5 +119,8 @@ In cases you wish the sender to proceed even in case the channel is full. To tha
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example async-channel-try --features test-critical-section
|
$ cargo run --target thumbv7m-none-eabi --example async-channel-try --features test-critical-section
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/async-channel-try.run}}
|
{{#include ../../../../rtic/ci/expected/async-channel-try.run}}
|
||||||
```
|
```
|
|
@ -4,7 +4,7 @@ A convenient way to express *miniminal* timing requirements is by means of delay
|
||||||
|
|
||||||
This can be achieved by instantiating a monotonic timer:
|
This can be achieved by instantiating a monotonic timer:
|
||||||
|
|
||||||
```rust
|
``` rust
|
||||||
...
|
...
|
||||||
rtic_monotonics::make_systick_timer_queue!(TIMER);
|
rtic_monotonics::make_systick_timer_queue!(TIMER);
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ fn init(cx: init::Context) -> (Shared, Local) {
|
||||||
|
|
||||||
A *software* task can `await` the delay to expire:
|
A *software* task can `await` the delay to expire:
|
||||||
|
|
||||||
```rust
|
``` rust
|
||||||
#[task]
|
#[task]
|
||||||
async fn foo(_cx: foo::Context) {
|
async fn foo(_cx: foo::Context) {
|
||||||
...
|
...
|
||||||
|
@ -27,6 +27,8 @@ async fn foo(_cx: foo::Context) {
|
||||||
|
|
||||||
Technically, the timer queue is implemented as a list based priority queue, where list-nodes are statically allocated as part of the underlying task `Future`. Thus, the timer queue is infallible at run-time (its size and allocation is determined at compile time).
|
Technically, the timer queue is implemented as a list based priority queue, where list-nodes are statically allocated as part of the underlying task `Future`. Thus, the timer queue is infallible at run-time (its size and allocation is determined at compile time).
|
||||||
|
|
||||||
|
Similarly the channels implementation, the timer-queue implementation relies on a global *Critical Section* (CS) for race protection. For the examples a CS implementation is provided by adding `--features test-critical-section` to the build options.
|
||||||
|
|
||||||
For a complete example:
|
For a complete example:
|
||||||
|
|
||||||
``` rust
|
``` rust
|
||||||
|
@ -35,6 +37,9 @@ For a complete example:
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example async-delay --features test-critical-section
|
$ cargo run --target thumbv7m-none-eabi --example async-delay --features test-critical-section
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/async-delay.run}}
|
{{#include ../../../../rtic/ci/expected/async-delay.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -112,5 +117,8 @@ The complete example:
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example async-timeout --features test-critical-section
|
$ cargo run --target thumbv7m-none-eabi --example async-timeout --features test-critical-section
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/async-timeout.run}}
|
{{#include ../../../../rtic/ci/expected/async-timeout.run}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -25,5 +25,8 @@ The example below demonstrates the use of the `#[task(binds = InterruptName)]` a
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example hardware
|
$ cargo run --target thumbv7m-none-eabi --example hardware
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/hardware.run}}
|
{{#include ../../../../rtic/ci/expected/hardware.run}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -33,6 +33,9 @@ Running the example:
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example locals
|
$ cargo run --target thumbv7m-none-eabi --example locals
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/locals.run}}
|
{{#include ../../../../rtic/ci/expected/locals.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -79,6 +82,9 @@ In the example below we have three interrupt handlers with priorities ranging fr
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example lock
|
$ cargo run --target thumbv7m-none-eabi --example lock
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/lock.run}}
|
{{#include ../../../../rtic/ci/expected/lock.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -94,6 +100,9 @@ As an extension to `lock`, and to reduce rightward drift, locks can be taken as
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example multilock
|
$ cargo run --target thumbv7m-none-eabi --example multilock
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/multilock.run}}
|
{{#include ../../../../rtic/ci/expected/multilock.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -113,6 +122,9 @@ In the example below a key (e.g. a cryptographic key) is loaded (or created) at
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example only-shared-access
|
$ cargo run --target thumbv7m-none-eabi --example only-shared-access
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/only-shared-access.run}}
|
{{#include ../../../../rtic/ci/expected/only-shared-access.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -136,5 +148,8 @@ Using `#[lock_free]` on resources shared by tasks running at different prioritie
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example lock-free
|
$ cargo run --target thumbv7m-none-eabi --example lock-free
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/lock-free.run}}
|
{{#include ../../../../rtic/ci/expected/lock-free.run}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -29,6 +29,9 @@ See the following example:
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example spawn
|
$ cargo run --target thumbv7m-none-eabi --example spawn
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/spawn.run}}
|
{{#include ../../../../rtic/ci/expected/spawn.run}}
|
||||||
```
|
```
|
||||||
You may `spawn` a *software* task again, given that it has run-to-completion (returned).
|
You may `spawn` a *software* task again, given that it has run-to-completion (returned).
|
||||||
|
@ -43,6 +46,9 @@ Technically the async executor will `poll` the `foo` *future* which in this case
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example spawn_loop
|
$ cargo run --target thumbv7m-none-eabi --example spawn_loop
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/spawn_loop.run}}
|
{{#include ../../../../rtic/ci/expected/spawn_loop.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -56,6 +62,9 @@ Technically, a `spawn` to a *future* that is not in *completed* state is conside
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example spawn_err
|
$ cargo run --target thumbv7m-none-eabi --example spawn_err
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/spawn_err.run}}
|
{{#include ../../../../rtic/ci/expected/spawn_err.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -68,6 +77,9 @@ You can also pass arguments at spawn as follows.
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example spawn_arguments
|
$ cargo run --target thumbv7m-none-eabi --example spawn_arguments
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/spawn_arguments.run}}
|
{{#include ../../../../rtic/ci/expected/spawn_arguments.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -86,6 +98,9 @@ Conceptually, one can see such tasks as running in the `main` thread of the appl
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example zero-prio-task
|
$ cargo run --target thumbv7m-none-eabi --example zero-prio-task
|
||||||
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
{{#include ../../../../rtic/ci/expected/zero-prio-task.run}}
|
{{#include ../../../../rtic/ci/expected/zero-prio-task.run}}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,15 @@
|
||||||
# Resource de-structure-ing
|
# Resource de-structure-ing
|
||||||
|
|
||||||
Destructuring task resources might help readability if a task takes multiple
|
Destructuring task resources might help readability if a task takes multiple
|
||||||
resources.
|
resources. Here are two examples on how to split up the resource struct:
|
||||||
Here are two examples on how to split up the resource struct:
|
|
||||||
|
|
||||||
``` rust
|
``` rust
|
||||||
{{#include ../../../../examples/destructure.rs}}
|
{{#include ../../../../rtic/examples/destructure.rs}}
|
||||||
```
|
```
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example destructure
|
$ cargo run --target thumbv7m-none-eabi --example destructure
|
||||||
{{#include ../../../../ci/expected/destructure.run}}
|
```
|
||||||
|
``` console
|
||||||
|
{{#include ../../../../rtic/ci/expected/destructure.run}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,31 +1,26 @@
|
||||||
# Using indirection for faster message passing
|
# Using indirection for faster message passing
|
||||||
|
|
||||||
Message passing always involves copying the payload from the sender into a
|
Message passing always involves copying the payload from the sender into a static variable and then from the static variable into the receiver. Thus sending a large buffer, like a `[u8; 128]`, as a message involves two expensive
|
||||||
static variable and then from the static variable into the receiver. Thus
|
|
||||||
sending a large buffer, like a `[u8; 128]`, as a message involves two expensive
|
|
||||||
`memcpy`s.
|
`memcpy`s.
|
||||||
|
|
||||||
Indirection can minimize message passing overhead:
|
Indirection can minimize message passing overhead: instead of sending the buffer by value, one can send an owning pointer into the buffer.
|
||||||
instead of sending the buffer by value, one can send an owning pointer into the
|
|
||||||
buffer.
|
|
||||||
|
|
||||||
One can use a global memory allocator to achieve indirection (`alloc::Box`,
|
One can use a global memory allocator to achieve indirection (`alloc::Box`, `alloc::Rc`, etc.), which requires using the nightly channel as of Rust v1.37.0, or one can use a statically allocated memory pool like [`heapless::Pool`].
|
||||||
`alloc::Rc`, etc.), which requires using the nightly channel as of Rust v1.37.0,
|
|
||||||
or one can use a statically allocated memory pool like [`heapless::Pool`].
|
|
||||||
|
|
||||||
[`heapless::Pool`]: https://docs.rs/heapless/0.5.0/heapless/pool/index.html
|
[`heapless::Pool`]: https://docs.rs/heapless/0.5.0/heapless/pool/index.html
|
||||||
|
|
||||||
As this example of approach goes completely outside of RTIC resource
|
As this example of approach goes completely outside of RTIC resource model with shared and local the program would rely on the correctness of the memory allocator, in this case `heapless::pool`.
|
||||||
model with shared and local the program would rely on the correctness
|
|
||||||
of the memory allocator, in this case `heapless::pool`.
|
|
||||||
|
|
||||||
Here's an example where `heapless::Pool` is used to "box" buffers of 128 bytes.
|
Here's an example where `heapless::Pool` is used to "box" buffers of 128 bytes.
|
||||||
|
|
||||||
``` rust
|
``` rust
|
||||||
{{#include ../../../../examples/pool.rs}}
|
{{#include ../../../../rtic/examples/pool.rs}}
|
||||||
```
|
```
|
||||||
|
|
||||||
``` console
|
``` console
|
||||||
$ cargo run --target thumbv7m-none-eabi --example pool
|
$ cargo run --target thumbv7m-none-eabi --example pool
|
||||||
{{#include ../../../../ci/expected/pool.run}}
|
```
|
||||||
|
|
||||||
|
``` console
|
||||||
|
{{#include ../../../../rtic/ci/expected/pool.run}}
|
||||||
```
|
```
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
#![deny(warnings)]
|
#![deny(warnings)]
|
||||||
#![no_main]
|
#![no_main]
|
||||||
#![no_std]
|
#![no_std]
|
||||||
|
#![feature(type_alias_impl_trait)]
|
||||||
|
|
||||||
use heapless::{
|
use heapless::{
|
||||||
pool,
|
pool,
|
||||||
|
@ -41,7 +42,7 @@ mod app {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[task(binds = I2C0, priority = 2)]
|
#[task(binds = I2C0, priority = 2)]
|
||||||
async fn i2c0(_: i2c0::Context) {
|
fn i2c0(_: i2c0::Context) {
|
||||||
// claim a memory block, initialize it and ..
|
// claim a memory block, initialize it and ..
|
||||||
let x = P::alloc().unwrap().init([0u8; 128]);
|
let x = P::alloc().unwrap().init([0u8; 128]);
|
||||||
|
|
||||||
|
@ -56,7 +57,7 @@ mod app {
|
||||||
|
|
||||||
#[task]
|
#[task]
|
||||||
async fn foo(_: foo::Context, x: Box<P>) {
|
async fn foo(_: foo::Context, x: Box<P>) {
|
||||||
hprintln!("foo({:?})", x.as_ptr()).unwrap();
|
hprintln!("foo({:?})", x.as_ptr());
|
||||||
|
|
||||||
// explicitly return the block to the pool
|
// explicitly return the block to the pool
|
||||||
drop(x);
|
drop(x);
|
||||||
|
@ -66,7 +67,7 @@ mod app {
|
||||||
|
|
||||||
#[task(priority = 2)]
|
#[task(priority = 2)]
|
||||||
async fn bar(_: bar::Context, x: Box<P>) {
|
async fn bar(_: bar::Context, x: Box<P>) {
|
||||||
hprintln!("bar({:?})", x.as_ptr()).unwrap();
|
hprintln!("bar({:?})", x.as_ptr());
|
||||||
|
|
||||||
// this is done automatically so we can omit the call to `drop`
|
// this is done automatically so we can omit the call to `drop`
|
||||||
// drop(x);
|
// drop(x);
|
Loading…
Reference in a new issue