mirror of
https://github.com/rtic-rs/rtic.git
synced 2024-11-25 21:19:35 +01:00
Broke out async dispatchers into their own place
This commit is contained in:
parent
b1d499a744
commit
1341cc5bbe
18 changed files with 419 additions and 266 deletions
|
@ -10,7 +10,7 @@ use panic_semihosting as _;
|
|||
// task can have a mutable reference stored.
|
||||
// - Spawning an async task equates to it being polled once.
|
||||
|
||||
#[rtic::app(device = lm3s6965, dispatchers = [SSI0, UART0], peripherals = true)]
|
||||
#[rtic::app(device = lm3s6965, dispatchers = [SSI0, QEI0, UART0, UART1], peripherals = true)]
|
||||
mod app {
|
||||
use cortex_m_semihosting::{debug, hprintln};
|
||||
use systick_monotonic::*;
|
||||
|
|
|
@ -11,7 +11,8 @@ use syn::Ident;
|
|||
/// Extend the upstream `Analysis` struct with our field
|
||||
pub struct Analysis {
|
||||
parent: P<analyze::Analysis>,
|
||||
pub interrupts: BTreeMap<Priority, (Ident, ExternInterrupt)>,
|
||||
pub interrupts_normal: BTreeMap<Priority, (Ident, ExternInterrupt)>,
|
||||
pub interrupts_async: BTreeMap<Priority, (Ident, ExternInterrupt)>,
|
||||
}
|
||||
|
||||
impl ops::Deref for Analysis {
|
||||
|
@ -24,24 +25,42 @@ impl ops::Deref for Analysis {
|
|||
|
||||
// Assign an interrupt to each priority level
|
||||
pub fn app(analysis: P<analyze::Analysis>, app: &App) -> P<Analysis> {
|
||||
let mut available_interrupt = app.args.extern_interrupts.clone();
|
||||
|
||||
// the set of priorities (each priority only once)
|
||||
let priorities = app
|
||||
.software_tasks
|
||||
.values()
|
||||
.filter(|task| !task.is_async)
|
||||
.map(|task| task.args.priority)
|
||||
.collect::<BTreeSet<_>>();
|
||||
|
||||
let priorities_async = app
|
||||
.software_tasks
|
||||
.values()
|
||||
.filter(|task| task.is_async)
|
||||
.map(|task| task.args.priority)
|
||||
.collect::<BTreeSet<_>>();
|
||||
|
||||
// map from priorities to interrupts (holding name and attributes)
|
||||
let interrupts: BTreeMap<Priority, _> = priorities
|
||||
|
||||
let interrupts_normal: BTreeMap<Priority, _> = priorities
|
||||
.iter()
|
||||
.copied()
|
||||
.rev()
|
||||
.zip(&app.args.extern_interrupts)
|
||||
.map(|(p, (id, ext))| (p, (id.clone(), ext.clone())))
|
||||
.map(|p| (p, available_interrupt.pop().expect("UNREACHABLE")))
|
||||
.collect();
|
||||
|
||||
let interrupts_async: BTreeMap<Priority, _> = priorities_async
|
||||
.iter()
|
||||
.copied()
|
||||
.rev()
|
||||
.map(|p| (p, available_interrupt.pop().expect("UNREACHABLE")))
|
||||
.collect();
|
||||
|
||||
P::new(Analysis {
|
||||
parent: analysis,
|
||||
interrupts,
|
||||
interrupts_normal,
|
||||
interrupts_async,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -36,28 +36,39 @@ pub fn app(app: &App, _analysis: &Analysis) -> parse::Result<Extra> {
|
|||
.iter()
|
||||
.map(|(name, task)| {
|
||||
first = Some(name);
|
||||
task.args.priority
|
||||
(task.args.priority, task.is_async)
|
||||
})
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let need = priorities
|
||||
let need_sync = priorities
|
||||
.iter()
|
||||
// Only count if not 0
|
||||
.filter(|prio| **prio > 0)
|
||||
// Only count if not 0 and not async
|
||||
.filter(|(prio, is_async)| *prio > 0 && !*is_async)
|
||||
.count();
|
||||
|
||||
let need_async = priorities
|
||||
.iter()
|
||||
// Only count if not 0 and async
|
||||
.filter(|(prio, is_async)| *prio > 0 && *is_async)
|
||||
.count();
|
||||
|
||||
let given = app.args.extern_interrupts.len();
|
||||
if need > given {
|
||||
if need_sync + need_async > given {
|
||||
let s = {
|
||||
format!(
|
||||
"not enough interrupts to dispatch \
|
||||
all software tasks (need: {}; given: {})",
|
||||
need, given
|
||||
"not enough interrupts to dispatch all software and async tasks \
|
||||
(need: {}; given: {}) - one interrupt is needed per priority and sync/async task",
|
||||
need_sync + need_async,
|
||||
given
|
||||
)
|
||||
};
|
||||
|
||||
// If not enough tasks and first still is None, may cause
|
||||
// "custom attribute panicked" due to unwrap on None
|
||||
return Err(parse::Error::new(first.unwrap().span(), &s));
|
||||
return Err(parse::Error::new(
|
||||
first.expect("RTIC-ICE: needed async + needed sync").span(),
|
||||
&s,
|
||||
));
|
||||
}
|
||||
|
||||
// Check that all exceptions are valid; only exceptions with configurable priorities are
|
||||
|
|
|
@ -5,6 +5,7 @@ use rtic_syntax::ast::App;
|
|||
use crate::{analyze::Analysis, check::Extra};
|
||||
|
||||
mod assertions;
|
||||
mod async_dispatchers;
|
||||
mod dispatchers;
|
||||
mod hardware_tasks;
|
||||
mod idle;
|
||||
|
@ -99,6 +100,7 @@ pub fn app(app: &App, analysis: &Analysis, extra: &Extra) -> TokenStream2 {
|
|||
let monotonics = monotonic::codegen(app, analysis, extra);
|
||||
|
||||
let mod_app_dispatchers = dispatchers::codegen(app, analysis, extra);
|
||||
let mod_app_async_dispatchers = async_dispatchers::codegen(app, analysis, extra);
|
||||
let mod_app_timer_queue = timer_queue::codegen(app, analysis, extra);
|
||||
let user_imports = &app.user_imports;
|
||||
let user_code = &app.user_code;
|
||||
|
@ -150,6 +152,8 @@ pub fn app(app: &App, analysis: &Analysis, extra: &Extra) -> TokenStream2 {
|
|||
|
||||
#(#mod_app_dispatchers)*
|
||||
|
||||
#(#mod_app_async_dispatchers)*
|
||||
|
||||
#(#mod_app_timer_queue)*
|
||||
|
||||
#(#mains)*
|
||||
|
|
130
macros/src/codegen/async_dispatchers.rs
Normal file
130
macros/src/codegen/async_dispatchers.rs
Normal file
|
@ -0,0 +1,130 @@
|
|||
use proc_macro2::TokenStream as TokenStream2;
|
||||
use quote::quote;
|
||||
use rtic_syntax::ast::App;
|
||||
|
||||
use crate::{analyze::Analysis, check::Extra, codegen::util};
|
||||
|
||||
/// Generates task dispatchers
|
||||
pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream2> {
|
||||
let mut items = vec![];
|
||||
|
||||
let interrupts = &analysis.interrupts_async;
|
||||
|
||||
// Generate executor definition and priority in global scope
|
||||
for (name, task) in app.software_tasks.iter() {
|
||||
if task.is_async {
|
||||
let type_name = util::internal_task_ident(name, "F");
|
||||
let exec_name = util::internal_task_ident(name, "EXEC");
|
||||
let prio_name = util::internal_task_ident(name, "PRIORITY");
|
||||
|
||||
items.push(quote!(
|
||||
#[allow(non_camel_case_types)]
|
||||
type #type_name = impl core::future::Future + 'static;
|
||||
#[allow(non_upper_case_globals)]
|
||||
static #exec_name:
|
||||
rtic::RacyCell<rtic::export::executor::AsyncTaskExecutor<#type_name>> =
|
||||
rtic::RacyCell::new(rtic::export::executor::AsyncTaskExecutor::new());
|
||||
|
||||
// The executors priority, this can be any value - we will overwrite it when we
|
||||
// start a task
|
||||
#[allow(non_upper_case_globals)]
|
||||
static #prio_name: rtic::RacyCell<rtic::export::Priority> =
|
||||
unsafe { rtic::RacyCell::new(rtic::export::Priority::new(0)) };
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
for (&level, channel) in &analysis.channels {
|
||||
if channel
|
||||
.tasks
|
||||
.iter()
|
||||
.map(|task_name| !app.software_tasks[task_name].is_async)
|
||||
.all(|is_not_async| is_not_async)
|
||||
{
|
||||
// check if all tasks are not async, if so don't generate this.
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut stmts = vec![];
|
||||
let device = &extra.device;
|
||||
let enum_ = util::interrupt_ident();
|
||||
let interrupt = util::suffixed(&interrupts[&level].0.to_string());
|
||||
|
||||
for name in channel
|
||||
.tasks
|
||||
.iter()
|
||||
.filter(|name| app.software_tasks[*name].is_async)
|
||||
{
|
||||
let exec_name = util::internal_task_ident(name, "EXEC");
|
||||
let prio_name = util::internal_task_ident(name, "PRIORITY");
|
||||
let task = &app.software_tasks[name];
|
||||
// let cfgs = &task.cfgs;
|
||||
let (_, tupled, pats, input_types) = util::regroup_inputs(&task.inputs);
|
||||
let executor_run_ident = util::executor_run_ident(name);
|
||||
|
||||
let n = util::capacity_literal(channel.capacity as usize + 1);
|
||||
let rq = util::rq_async_ident(name);
|
||||
let (rq_ty, rq_expr) = {
|
||||
(
|
||||
quote!(rtic::export::ASYNCRQ<#input_types, #n>),
|
||||
quote!(rtic::export::Queue::new()),
|
||||
)
|
||||
};
|
||||
|
||||
items.push(quote!(
|
||||
#[doc(hidden)]
|
||||
#[allow(non_camel_case_types)]
|
||||
#[allow(non_upper_case_globals)]
|
||||
static #rq: rtic::RacyCell<#rq_ty> = rtic::RacyCell::new(#rq_expr);
|
||||
));
|
||||
|
||||
stmts.push(quote!(
|
||||
if !(&mut *#exec_name.get_mut()).is_running() {
|
||||
if let Some(#tupled) = rtic::export::interrupt::free(|_| (&mut *#rq.get_mut()).dequeue()) {
|
||||
|
||||
// The async executor needs a static priority
|
||||
#prio_name.get_mut().write(rtic::export::Priority::new(PRIORITY));
|
||||
let priority: &'static _ = &*#prio_name.get();
|
||||
|
||||
(&mut *#exec_name.get_mut()).spawn(#name(#name::Context::new(priority) #(,#pats)*));
|
||||
#executor_run_ident.store(true, core::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
if #executor_run_ident.load(core::sync::atomic::Ordering::Relaxed) {
|
||||
#executor_run_ident.store(false, core::sync::atomic::Ordering::Relaxed);
|
||||
if (&mut *#exec_name.get_mut()).poll(|| {
|
||||
#executor_run_ident.store(true, core::sync::atomic::Ordering::Release);
|
||||
rtic::pend(#device::#enum_::#interrupt);
|
||||
}) && !rtic::export::interrupt::free(|_| (&*#rq.get_mut()).is_empty()) {
|
||||
// If the ready queue is not empty and the executor finished, restart this
|
||||
// dispatch to check if the executor should be restarted.
|
||||
rtic::pend(#device::#enum_::#interrupt);
|
||||
}
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
let doc = format!(
|
||||
"Interrupt handler to dispatch async tasks at priority {}",
|
||||
level
|
||||
);
|
||||
let attribute = &interrupts[&level].1.attrs;
|
||||
items.push(quote!(
|
||||
#[allow(non_snake_case)]
|
||||
#[doc = #doc]
|
||||
#[no_mangle]
|
||||
#(#attribute)*
|
||||
unsafe fn #interrupt() {
|
||||
/// The priority of this interrupt handler
|
||||
const PRIORITY: u8 = #level;
|
||||
|
||||
rtic::export::run(PRIORITY, || {
|
||||
#(#stmts)*
|
||||
});
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
items
|
||||
}
|
|
@ -5,41 +5,28 @@ use rtic_syntax::ast::App;
|
|||
use crate::{analyze::Analysis, check::Extra, codegen::util};
|
||||
|
||||
/// Generates task dispatchers
|
||||
pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream2> {
|
||||
pub fn codegen(app: &App, analysis: &Analysis, _extra: &Extra) -> Vec<TokenStream2> {
|
||||
let mut items = vec![];
|
||||
|
||||
let interrupts = &analysis.interrupts;
|
||||
|
||||
// Generate executor definition and priority in global scope
|
||||
for (name, task) in app.software_tasks.iter() {
|
||||
if task.is_async {
|
||||
let type_name = util::internal_task_ident(name, "F");
|
||||
let exec_name = util::internal_task_ident(name, "EXEC");
|
||||
let prio_name = util::internal_task_ident(name, "PRIORITY");
|
||||
|
||||
items.push(quote!(
|
||||
#[allow(non_camel_case_types)]
|
||||
type #type_name = impl core::future::Future + 'static;
|
||||
#[allow(non_upper_case_globals)]
|
||||
static #exec_name:
|
||||
rtic::RacyCell<rtic::export::executor::AsyncTaskExecutor<#type_name>> =
|
||||
rtic::RacyCell::new(rtic::export::executor::AsyncTaskExecutor::new());
|
||||
|
||||
// The executors priority, this can be any value - we will overwrite it when we
|
||||
// start a task
|
||||
#[allow(non_upper_case_globals)]
|
||||
static #prio_name: rtic::RacyCell<rtic::export::Priority> =
|
||||
unsafe { rtic::RacyCell::new(rtic::export::Priority::new(0)) };
|
||||
));
|
||||
}
|
||||
}
|
||||
let interrupts = &analysis.interrupts_normal;
|
||||
|
||||
for (&level, channel) in &analysis.channels {
|
||||
if channel
|
||||
.tasks
|
||||
.iter()
|
||||
.map(|task_name| app.software_tasks[task_name].is_async)
|
||||
.all(|is_async| is_async)
|
||||
{
|
||||
// check if all tasks are async, if so don't generate this.
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut stmts = vec![];
|
||||
|
||||
let variants = channel
|
||||
.tasks
|
||||
.iter()
|
||||
.filter(|name| !app.software_tasks[*name].is_async)
|
||||
.map(|name| {
|
||||
let cfgs = &app.software_tasks[name].cfgs;
|
||||
|
||||
|
@ -69,6 +56,7 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream
|
|||
|
||||
let n = util::capacity_literal(channel.capacity as usize + 1);
|
||||
let rq = util::rq_ident(level);
|
||||
// let (_, _, _, input_ty) = util::regroup_inputs(inputs);
|
||||
let (rq_ty, rq_expr) = {
|
||||
(
|
||||
quote!(rtic::export::SCRQ<#t, #n>),
|
||||
|
@ -88,9 +76,13 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream
|
|||
static #rq: rtic::RacyCell<#rq_ty> = rtic::RacyCell::new(#rq_expr);
|
||||
));
|
||||
|
||||
let device = &extra.device;
|
||||
let enum_ = util::interrupt_ident();
|
||||
let interrupt = util::suffixed(&interrupts[&level].0.to_string());
|
||||
let interrupt = util::suffixed(
|
||||
&interrupts
|
||||
.get(&level)
|
||||
.expect("RTIC-ICE: Unable to get interrrupt")
|
||||
.0
|
||||
.to_string(),
|
||||
);
|
||||
let arms = channel
|
||||
.tasks
|
||||
.iter()
|
||||
|
@ -100,36 +92,8 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream
|
|||
let fq = util::fq_ident(name);
|
||||
let inputs = util::inputs_ident(name);
|
||||
let (_, tupled, pats, _) = util::regroup_inputs(&task.inputs);
|
||||
let exec_name = util::internal_task_ident(name, "EXEC");
|
||||
let prio_name = util::internal_task_ident(name, "PRIORITY");
|
||||
|
||||
if task.is_async {
|
||||
let executor_run_ident = util::executor_run_ident(name);
|
||||
|
||||
quote!(
|
||||
#(#cfgs)*
|
||||
#t::#name => {
|
||||
if !(&mut *#exec_name.get_mut()).is_running() {
|
||||
let #tupled =
|
||||
(&*#inputs
|
||||
.get())
|
||||
.get_unchecked(usize::from(index))
|
||||
.as_ptr()
|
||||
.read();
|
||||
(&mut *#fq.get_mut()).split().0.enqueue_unchecked(index);
|
||||
|
||||
// The async executor needs a static priority
|
||||
#prio_name.get_mut().write(rtic::export::Priority::new(PRIORITY));
|
||||
let priority: &'static _ = &*#prio_name.get();
|
||||
|
||||
(&mut *#exec_name.get_mut()).spawn(#name(#name::Context::new(priority) #(,#pats)*));
|
||||
#executor_run_ident.store(true, core::sync::atomic::Ordering::Relaxed);
|
||||
} else {
|
||||
retry_queue.push_unchecked((#t::#name, index));
|
||||
}
|
||||
}
|
||||
)
|
||||
} else {
|
||||
if !task.is_async {
|
||||
quote!(
|
||||
#(#cfgs)*
|
||||
#t::#name => {
|
||||
|
@ -147,37 +111,12 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream
|
|||
)
|
||||
}
|
||||
)
|
||||
} else {
|
||||
quote!()
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let n_executors = channel
|
||||
.tasks
|
||||
.iter()
|
||||
.map(|name| {
|
||||
let task = &app.software_tasks[name];
|
||||
if task.is_async {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
})
|
||||
.sum::<usize>()
|
||||
.max(1);
|
||||
|
||||
// TODO: This `retry_queue` comes from the current design of the dispatcher queue handling.
|
||||
// To remove this we would need to redesign how the dispatcher handles queues, and this can
|
||||
// be done as an optimization later.
|
||||
//
|
||||
// The core issue is that we should only dequeue the ready queue if the exexutor associated
|
||||
// to the task is not running. As it is today this queue is blindly dequeued, see the
|
||||
// `while let Some(...) = (&mut *#rq.get_mut())...` a few lines down. The current "hack" is
|
||||
// to just requeue the executor run if it should not have been dequeued. This needs however
|
||||
// to be done after the ready queue has been exhausted.
|
||||
stmts.push(quote!(
|
||||
let mut retry_queue: rtic::export::Vec<_, #n_executors> = rtic::export::Vec::new();
|
||||
));
|
||||
|
||||
stmts.push(quote!(
|
||||
while let Some((task, index)) = (&mut *#rq.get_mut()).split().1.dequeue() {
|
||||
match task {
|
||||
|
@ -186,37 +125,6 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream
|
|||
}
|
||||
));
|
||||
|
||||
for name in channel
|
||||
.tasks
|
||||
.iter()
|
||||
.filter(|name| app.software_tasks[*name].is_async)
|
||||
{
|
||||
let exec_name = util::internal_task_ident(name, "EXEC");
|
||||
|
||||
let executor_run_ident = util::executor_run_ident(name);
|
||||
stmts.push(quote!(
|
||||
if #executor_run_ident.load(core::sync::atomic::Ordering::Relaxed) {
|
||||
#executor_run_ident.store(false, core::sync::atomic::Ordering::Relaxed);
|
||||
if (&mut *#exec_name.get_mut()).poll(|| {
|
||||
#executor_run_ident.store(true, core::sync::atomic::Ordering::Release);
|
||||
rtic::pend(#device::#enum_::#interrupt);
|
||||
}) && !retry_queue.is_empty() {
|
||||
// If the retry queue is not empty and the executor finished, restart this
|
||||
// dispatch to check if the executor should be restarted.
|
||||
rtic::pend(#device::#enum_::#interrupt);
|
||||
}
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
stmts.push(quote!(
|
||||
while let Some((task, index)) = retry_queue.pop() {
|
||||
rtic::export::interrupt::free(|_| {
|
||||
(&mut *#rq.get_mut()).enqueue_unchecked((task, index));
|
||||
});
|
||||
}
|
||||
));
|
||||
|
||||
let doc = format!("Interrupt handler to dispatch tasks at priority {}", level);
|
||||
let attribute = &interrupts[&level].1.attrs;
|
||||
items.push(quote!(
|
||||
|
|
|
@ -13,7 +13,13 @@ pub fn codegen(ctxt: Context, needs_lt: &mut bool, app: &App) -> (TokenStream2,
|
|||
|
||||
let resources = match ctxt {
|
||||
Context::Init => &app.init.args.local_resources,
|
||||
Context::Idle => &app.idle.as_ref().unwrap().args.local_resources,
|
||||
Context::Idle => {
|
||||
&app.idle
|
||||
.as_ref()
|
||||
.expect("RTIC-ICE: unable to get idle name")
|
||||
.args
|
||||
.local_resources
|
||||
}
|
||||
Context::HardwareTask(name) => &app.hardware_tasks[name].args.local_resources,
|
||||
Context::SoftwareTask(name) => &app.software_tasks[name].args.local_resources,
|
||||
};
|
||||
|
|
|
@ -206,15 +206,45 @@ pub fn codegen(
|
|||
|
||||
let device = &extra.device;
|
||||
let enum_ = util::interrupt_ident();
|
||||
let interrupt = &analysis
|
||||
.interrupts
|
||||
let interrupt = if spawnee.is_async {
|
||||
&analysis
|
||||
.interrupts_async
|
||||
.get(&priority)
|
||||
.expect("RTIC-ICE: interrupt identifer not found")
|
||||
.0;
|
||||
.0
|
||||
} else {
|
||||
&analysis
|
||||
.interrupts_normal
|
||||
.get(&priority)
|
||||
.expect("RTIC-ICE: interrupt identifer not found")
|
||||
.0
|
||||
};
|
||||
|
||||
let internal_spawn_ident = util::internal_task_ident(name, "spawn");
|
||||
|
||||
// Spawn caller
|
||||
if spawnee.is_async {
|
||||
let rq = util::rq_async_ident(name);
|
||||
items.push(quote!(
|
||||
|
||||
#(#cfgs)*
|
||||
/// Spawns the task directly
|
||||
#[allow(non_snake_case)]
|
||||
#[doc(hidden)]
|
||||
pub fn #internal_spawn_ident(#(#args,)*) -> Result<(), #ty> {
|
||||
let input = #tupled;
|
||||
|
||||
unsafe {
|
||||
let r = rtic::export::interrupt::free(|_| (&mut *#rq.get_mut()).enqueue(input));
|
||||
|
||||
if r.is_ok() {
|
||||
rtic::pend(#device::#enum_::#interrupt);
|
||||
}
|
||||
|
||||
r
|
||||
}
|
||||
}));
|
||||
} else {
|
||||
items.push(quote!(
|
||||
|
||||
#(#cfgs)*
|
||||
|
@ -244,6 +274,7 @@ pub fn codegen(
|
|||
}
|
||||
|
||||
}));
|
||||
}
|
||||
|
||||
module_items.push(quote!(
|
||||
#(#cfgs)*
|
||||
|
@ -252,6 +283,7 @@ pub fn codegen(
|
|||
));
|
||||
|
||||
// Schedule caller
|
||||
if !spawnee.is_async {
|
||||
for (_, monotonic) in &app.monotonics {
|
||||
let instants = util::monotonic_instants_ident(name, &monotonic.ident);
|
||||
let monotonic_name = monotonic.ident.to_string();
|
||||
|
@ -279,9 +311,6 @@ pub fn codegen(
|
|||
|
||||
let tq_marker = &util::timer_queue_marker_ident();
|
||||
|
||||
// For future use
|
||||
// let doc = format!(" RTIC internal: {}:{}", file!(), line!());
|
||||
// items.push(quote!(#[doc = #doc]));
|
||||
let internal_spawn_handle_ident =
|
||||
util::internal_monotonics_ident(name, m, "SpawnHandle");
|
||||
let internal_spawn_at_ident = util::internal_monotonics_ident(name, m, "spawn_at");
|
||||
|
@ -438,6 +467,7 @@ pub fn codegen(
|
|||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if items.is_empty() {
|
||||
quote!()
|
||||
|
|
|
@ -15,6 +15,10 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream
|
|||
|
||||
// Populate the FreeQueue
|
||||
for (name, task) in &app.software_tasks {
|
||||
if task.is_async {
|
||||
continue;
|
||||
}
|
||||
|
||||
let cap = task.args.capacity;
|
||||
let fq_ident = util::fq_ident(name);
|
||||
|
||||
|
@ -38,7 +42,11 @@ pub fn codegen(app: &App, analysis: &Analysis, extra: &Extra) -> Vec<TokenStream
|
|||
stmts.push(quote!(let _ = #rt_err::#interrupt::#name;));
|
||||
}
|
||||
|
||||
let interrupt_ids = analysis.interrupts.iter().map(|(p, (id, _))| (p, id));
|
||||
let interrupt_ids = analysis
|
||||
.interrupts_normal
|
||||
.iter()
|
||||
.map(|(p, (id, _))| (p, id))
|
||||
.chain(analysis.interrupts_async.iter().map(|(p, (id, _))| (p, id)));
|
||||
|
||||
// Unmask interrupts and set their priorities
|
||||
for (&priority, name) in interrupt_ids.chain(app.hardware_tasks.values().filter_map(|task| {
|
||||
|
|
|
@ -112,7 +112,11 @@ pub fn codegen(
|
|||
};
|
||||
|
||||
// Computing mapping of used interrupts to masks
|
||||
let interrupt_ids = analysis.interrupts.iter().map(|(p, (id, _))| (p, id));
|
||||
let interrupt_ids = analysis
|
||||
.interrupts_normal
|
||||
.iter()
|
||||
.map(|(p, (id, _))| (p, id))
|
||||
.chain(analysis.interrupts_async.iter().map(|(p, (id, _))| (p, id)));
|
||||
|
||||
let mut prio_to_masks = HashMap::new();
|
||||
let device = &extra.device;
|
||||
|
@ -147,7 +151,7 @@ pub fn codegen(
|
|||
None
|
||||
}
|
||||
})) {
|
||||
let v = prio_to_masks.entry(priority - 1).or_insert(Vec::new());
|
||||
let v: &mut Vec<_> = prio_to_masks.entry(priority - 1).or_default();
|
||||
v.push(quote!(#device::Interrupt::#name as u32));
|
||||
mask_ids.push(quote!(#device::Interrupt::#name as u32));
|
||||
}
|
||||
|
|
|
@ -10,7 +10,13 @@ pub fn codegen(ctxt: Context, needs_lt: &mut bool, app: &App) -> (TokenStream2,
|
|||
|
||||
let resources = match ctxt {
|
||||
Context::Init => unreachable!("Tried to generate shared resources struct for init"),
|
||||
Context::Idle => &app.idle.as_ref().unwrap().args.shared_resources,
|
||||
Context::Idle => {
|
||||
&app.idle
|
||||
.as_ref()
|
||||
.expect("RTIC-ICE: unable to get idle name")
|
||||
.args
|
||||
.shared_resources
|
||||
}
|
||||
Context::HardwareTask(name) => &app.hardware_tasks[name].args.shared_resources,
|
||||
Context::SoftwareTask(name) => &app.software_tasks[name].args.shared_resources,
|
||||
};
|
||||
|
|
|
@ -36,6 +36,7 @@ pub fn codegen(
|
|||
let cap_lit = util::capacity_literal(cap as usize);
|
||||
let cap_lit_p1 = util::capacity_literal(cap as usize + 1);
|
||||
|
||||
if !task.is_async {
|
||||
// Create free queues and inputs / instants buffers
|
||||
let fq = util::fq_ident(name);
|
||||
|
||||
|
@ -47,6 +48,7 @@ pub fn codegen(
|
|||
Box::new(|| Some(util::link_section_uninit())),
|
||||
)
|
||||
};
|
||||
|
||||
mod_app.push(quote!(
|
||||
// /// Queue version of a free-list that keeps track of empty slots in
|
||||
// /// the following buffers
|
||||
|
@ -83,15 +85,17 @@ pub fn codegen(
|
|||
let uninit = mk_uninit();
|
||||
let inputs_ident = util::inputs_ident(name);
|
||||
|
||||
// Buffer that holds the inputs of a task
|
||||
mod_app.push(quote!(
|
||||
#uninit
|
||||
// /// Buffer that holds the inputs of a task
|
||||
#[allow(non_camel_case_types)]
|
||||
#[allow(non_upper_case_globals)]
|
||||
#[doc(hidden)]
|
||||
static #inputs_ident: rtic::RacyCell<[core::mem::MaybeUninit<#input_ty>; #cap_lit]> =
|
||||
rtic::RacyCell::new([#(#elems,)*]);
|
||||
));
|
||||
}
|
||||
|
||||
if task.is_async {
|
||||
let executor_ident = util::executor_run_ident(name);
|
||||
mod_app.push(quote!(
|
||||
|
|
|
@ -26,6 +26,7 @@ pub fn codegen(app: &App, analysis: &Analysis, _extra: &Extra) -> Vec<TokenStrea
|
|||
let variants = app
|
||||
.software_tasks
|
||||
.iter()
|
||||
.filter(|(_, task)| !task.is_async)
|
||||
.map(|(name, task)| {
|
||||
let cfgs = &task.cfgs;
|
||||
|
||||
|
@ -103,6 +104,7 @@ pub fn codegen(app: &App, analysis: &Analysis, _extra: &Extra) -> Vec<TokenStrea
|
|||
let arms = app
|
||||
.software_tasks
|
||||
.iter()
|
||||
.filter(|(_, task)| !task.is_async)
|
||||
.map(|(name, task)| {
|
||||
let cfgs = &task.cfgs;
|
||||
let priority = task.args.priority;
|
||||
|
@ -110,7 +112,7 @@ pub fn codegen(app: &App, analysis: &Analysis, _extra: &Extra) -> Vec<TokenStrea
|
|||
let rqt = util::spawn_t_ident(priority);
|
||||
|
||||
// The interrupt that runs the task dispatcher
|
||||
let interrupt = &analysis.interrupts.get(&priority).expect("RTIC-ICE: interrupt not found").0;
|
||||
let interrupt = &analysis.interrupts_normal.get(&priority).expect("RTIC-ICE: interrupt not found").0;
|
||||
|
||||
let pend = {
|
||||
quote!(
|
||||
|
|
|
@ -184,7 +184,12 @@ pub fn regroup_inputs(
|
|||
pub fn get_task_name(ctxt: Context, app: &App) -> Ident {
|
||||
let s = match ctxt {
|
||||
Context::Init => app.init.name.to_string(),
|
||||
Context::Idle => app.idle.as_ref().unwrap().name.to_string(),
|
||||
Context::Idle => app
|
||||
.idle
|
||||
.as_ref()
|
||||
.expect("RTIC-ICE: unable to find idle name")
|
||||
.name
|
||||
.to_string(),
|
||||
Context::HardwareTask(ident) | Context::SoftwareTask(ident) => ident.to_string(),
|
||||
};
|
||||
|
||||
|
@ -195,7 +200,12 @@ pub fn get_task_name(ctxt: Context, app: &App) -> Ident {
|
|||
pub fn shared_resources_ident(ctxt: Context, app: &App) -> Ident {
|
||||
let mut s = match ctxt {
|
||||
Context::Init => app.init.name.to_string(),
|
||||
Context::Idle => app.idle.as_ref().unwrap().name.to_string(),
|
||||
Context::Idle => app
|
||||
.idle
|
||||
.as_ref()
|
||||
.expect("RTIC-ICE: unable to find idle name")
|
||||
.name
|
||||
.to_string(),
|
||||
Context::HardwareTask(ident) | Context::SoftwareTask(ident) => ident.to_string(),
|
||||
};
|
||||
|
||||
|
@ -208,7 +218,12 @@ pub fn shared_resources_ident(ctxt: Context, app: &App) -> Ident {
|
|||
pub fn local_resources_ident(ctxt: Context, app: &App) -> Ident {
|
||||
let mut s = match ctxt {
|
||||
Context::Init => app.init.name.to_string(),
|
||||
Context::Idle => app.idle.as_ref().unwrap().name.to_string(),
|
||||
Context::Idle => app
|
||||
.idle
|
||||
.as_ref()
|
||||
.expect("RTIC-ICE: unable to find idle name")
|
||||
.name
|
||||
.to_string(),
|
||||
Context::HardwareTask(ident) | Context::SoftwareTask(ident) => ident.to_string(),
|
||||
};
|
||||
|
||||
|
@ -225,6 +240,11 @@ pub fn rq_ident(priority: u8) -> Ident {
|
|||
mark_internal_name(&format!("P{}_RQ", priority))
|
||||
}
|
||||
|
||||
/// Generates an identifier for a ready queue, async task version
|
||||
pub fn rq_async_ident(async_task_name: &Ident) -> Ident {
|
||||
mark_internal_name(&format!("ASYNC_TACK_{}_RQ", async_task_name))
|
||||
}
|
||||
|
||||
/// Generates an identifier for the `enum` of `schedule`-able tasks
|
||||
pub fn schedule_t_ident() -> Ident {
|
||||
mark_internal_name("SCHED_T")
|
||||
|
|
|
@ -33,8 +33,8 @@ fn analyze() {
|
|||
.unwrap();
|
||||
|
||||
let analysis = crate::analyze::app(analysis, &app);
|
||||
let interrupts = &analysis.interrupts;
|
||||
let interrupts = &analysis.interrupts_normal;
|
||||
assert_eq!(interrupts.len(), 2);
|
||||
assert_eq!(interrupts[&2].0.to_string(), "B");
|
||||
assert_eq!(interrupts[&1].0.to_string(), "A");
|
||||
assert_eq!(interrupts[&2].0.to_string(), "A");
|
||||
assert_eq!(interrupts[&1].0.to_string(), "B");
|
||||
}
|
||||
|
|
|
@ -145,6 +145,7 @@ pub mod executor {
|
|||
|
||||
pub type SCFQ<const N: usize> = Queue<u8, N>;
|
||||
pub type SCRQ<T, const N: usize> = Queue<(T, u8), N>;
|
||||
pub type ASYNCRQ<T, const N: usize> = Queue<T, N>;
|
||||
|
||||
/// Mask is used to store interrupt masks on systems without a BASEPRI register (M0, M0+, M23).
|
||||
/// It needs to be large enough to cover all the relevant interrupts in use.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
error: not enough interrupts to dispatch all software tasks (need: 1; given: 0)
|
||||
--> $DIR/extern-interrupt-not-enough.rs:17:8
|
||||
error: not enough interrupts to dispatch all software and async tasks (need: 1; given: 0) - one interrupt is needed per priority and sync/async task
|
||||
--> ui/extern-interrupt-not-enough.rs:17:8
|
||||
|
|
||||
17 | fn a(_: a::Context) {}
|
||||
| ^
|
||||
|
|
|
@ -12,4 +12,4 @@ error[E0080]: evaluation of constant value failed
|
|||
3 | #[rtic::app(device = lm3s6965)]
|
||||
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ the evaluated program panicked at 'Maximum priority used by interrupt vector 'I2C0' is more than supported by hardware', $DIR/ui/task-priority-too-high.rs:3:1
|
||||
|
|
||||
= note: this error originates in the macro `$crate::panic::panic_2021` (in Nightly builds, run with -Z macro-backtrace for more info)
|
||||
= note: this error originates in the macro `$crate::panic::panic_2021` which comes from the expansion of the macro `::core::panic` (in Nightly builds, run with -Z macro-backtrace for more info)
|
||||
|
|
Loading…
Reference in a new issue