diff --git a/Cargo.toml b/Cargo.toml index 1b094af..910e668 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ cortex-m = "0.6" embedded-hal = { version = "0.2", features = ["unproven"] } stm32f0 = "0.12.1" nb = "1.0" +embedded-dma = "0.1.2" void = { version = "1.0", default-features = false } stm32-usbd = { version = "0.5.1", features = ["ram_access_2x16"], optional = true } diff --git a/src/dma.rs b/src/dma.rs new file mode 100644 index 0000000..43e6759 --- /dev/null +++ b/src/dma.rs @@ -0,0 +1,506 @@ +//! # Direct Memory Access +#![allow(dead_code)] + +use core::{ + marker::PhantomData, + sync::atomic::{compiler_fence, Ordering}, +}; +use embedded_dma::{StaticReadBuffer, StaticWriteBuffer}; + +use super::rcc::Rcc; + +#[derive(Debug)] +#[non_exhaustive] +pub enum Error { + Overrun, +} + +pub enum Event { + HalfTransfer, + TransferComplete, +} + +#[derive(Clone, Copy, PartialEq)] +pub enum Half { + First, + Second, +} + +pub struct CircBuffer +where + BUFFER: 'static, +{ + buffer: &'static mut [BUFFER; 2], + payload: PAYLOAD, + readable_half: Half, +} + +impl CircBuffer +where + &'static mut [BUFFER; 2]: StaticWriteBuffer, + BUFFER: 'static, +{ + pub(crate) fn new(buf: &'static mut [BUFFER; 2], payload: PAYLOAD) -> Self { + CircBuffer { + buffer: buf, + payload, + readable_half: Half::Second, + } + } +} + +pub trait DmaExt { + type Channels; + + fn split(self, ahb: &mut Rcc) -> Self::Channels; +} + +pub trait TransferPayload { + fn start(&mut self); + fn stop(&mut self); +} + +pub struct Transfer +where + PAYLOAD: TransferPayload, +{ + _mode: PhantomData, + buffer: BUFFER, + payload: PAYLOAD, +} + +impl Transfer +where + PAYLOAD: TransferPayload, +{ + pub(crate) fn r(buffer: BUFFER, payload: PAYLOAD) -> Self { + Transfer { + _mode: PhantomData, + buffer, + payload, + } + } +} + +impl Transfer +where + PAYLOAD: TransferPayload, +{ + pub(crate) fn w(buffer: BUFFER, payload: PAYLOAD) -> Self { + Transfer { + _mode: PhantomData, + buffer, + payload, + } + } +} + +impl Drop for Transfer +where + PAYLOAD: TransferPayload, +{ + fn drop(&mut self) { + self.payload.stop(); + compiler_fence(Ordering::SeqCst); + } +} + +/// Read transfer +pub struct R; + +/// Write transfer +pub struct W; + +macro_rules! dma { + ($($DMAX:ident: ($dmaX:ident, $dmaen:ident, { + $($CX:ident: ( + $chX:ident, + $htifX:ident, + $tcifX:ident, + $chtifX:ident, + $ctcifX:ident, + $cgifX:ident + ),)+ + }),)+) => { + $( + pub mod $dmaX { + use core::{sync::atomic::{self, Ordering}, ptr, mem}; + + use crate::pac::{$DMAX, dma1}; + use crate::rcc::Rcc; + + use crate::dma::{CircBuffer, DmaExt, Error, Event, Half, Transfer, W, RxDma, TxDma, TransferPayload}; + + #[allow(clippy::manual_non_exhaustive)] + pub struct Channels((), $(pub $CX),+); + + $( + /// A singleton that represents a single DMAx channel (channel X in this case) + /// + /// This singleton has exclusive access to the registers of the DMAx channel X + pub struct $CX { _0: () } + + impl $CX { + /// Associated peripheral `address` + /// + /// `inc` indicates whether the address will be incremented after every byte transfer + pub fn set_peripheral_address(&mut self, address: u32, inc: bool) { + self.ch().par.write(|w| unsafe { w.pa().bits(address) } ); + self.ch().cr.modify(|_, w| w.pinc().bit(inc) ); + } + + /// `address` where from/to data will be read/write + /// + /// `inc` indicates whether the address will be incremented after every byte transfer + pub fn set_memory_address(&mut self, address: u32, inc: bool) { + self.ch().mar.write(|w| unsafe { w.ma().bits(address) } ); + self.ch().cr.modify(|_, w| w.minc().bit(inc) ); + } + + /// Number of bytes to transfer + pub fn set_transfer_length(&mut self, len: usize) { + self.ch().ndtr.write(|w| w.ndt().bits(cast::u16(len).unwrap())); + } + + /// Starts the DMA transfer + pub fn start(&mut self) { + self.ch().cr.modify(|_, w| w.en().set_bit() ); + } + + /// Stops the DMA transfer + pub fn stop(&mut self) { + self.ifcr().write(|w| w.$cgifX().set_bit()); + self.ch().cr.modify(|_, w| w.en().clear_bit() ); + } + + /// Returns `true` if there's a transfer in progress + pub fn in_progress(&self) -> bool { + self.isr().$tcifX().bit_is_clear() + } + } + + impl $CX { + pub fn listen(&mut self, event: Event) { + match event { + Event::HalfTransfer => self.ch().cr.modify(|_, w| w.htie().set_bit()), + Event::TransferComplete => { + self.ch().cr.modify(|_, w| w.tcie().set_bit()) + } + } + } + + pub fn unlisten(&mut self, event: Event) { + match event { + Event::HalfTransfer => { + self.ch().cr.modify(|_, w| w.htie().clear_bit()) + }, + Event::TransferComplete => { + self.ch().cr.modify(|_, w| w.tcie().clear_bit()) + } + } + } + + pub fn ch(&mut self) -> &dma1::CH { + unsafe { &(*$DMAX::ptr()).$chX } + } + + pub fn isr(&self) -> dma1::isr::R { + // NOTE(unsafe) atomic read with no side effects + unsafe { (*$DMAX::ptr()).isr.read() } + } + + pub fn ifcr(&self) -> &dma1::IFCR { + unsafe { &(*$DMAX::ptr()).ifcr } + } + + pub fn get_ndtr(&self) -> u32 { + // NOTE(unsafe) atomic read with no side effects + unsafe { &(*$DMAX::ptr())}.$chX.ndtr.read().bits() + } + } + + impl CircBuffer> + where + RxDma: TransferPayload, + { + /// Peeks into the readable half of the buffer + pub fn peek(&mut self, f: F) -> Result + where + F: FnOnce(&B, Half) -> R, + { + let half_being_read = self.readable_half()?; + + let buf = match half_being_read { + Half::First => &self.buffer[0], + Half::Second => &self.buffer[1], + }; + + // XXX does this need a compiler barrier? + let ret = f(buf, half_being_read); + + + let isr = self.payload.channel.isr(); + let first_half_is_done = isr.$htifX().bit_is_set(); + let second_half_is_done = isr.$tcifX().bit_is_set(); + + if (half_being_read == Half::First && second_half_is_done) || + (half_being_read == Half::Second && first_half_is_done) { + Err(Error::Overrun) + } else { + Ok(ret) + } + } + + /// Returns the `Half` of the buffer that can be read + pub fn readable_half(&mut self) -> Result { + let isr = self.payload.channel.isr(); + let first_half_is_done = isr.$htifX().bit_is_set(); + let second_half_is_done = isr.$tcifX().bit_is_set(); + + if first_half_is_done && second_half_is_done { + return Err(Error::Overrun); + } + + let last_read_half = self.readable_half; + + Ok(match last_read_half { + Half::First => { + if second_half_is_done { + self.payload.channel.ifcr().write(|w| w.$ctcifX().set_bit()); + + self.readable_half = Half::Second; + Half::Second + } else { + last_read_half + } + } + Half::Second => { + if first_half_is_done { + self.payload.channel.ifcr().write(|w| w.$chtifX().set_bit()); + + self.readable_half = Half::First; + Half::First + } else { + last_read_half + } + } + }) + } + + /// Stops the transfer and returns the underlying buffer and RxDma + pub fn stop(mut self) -> (&'static mut [B; 2], RxDma) { + self.payload.stop(); + + (self.buffer, self.payload) + } + } + + impl Transfer> + where + RxDma: TransferPayload, + { + pub fn is_done(&self) -> bool { + !self.payload.channel.in_progress() + } + + pub fn wait(mut self) -> (BUFFER, RxDma) { + while !self.is_done() {} + + atomic::compiler_fence(Ordering::Acquire); + + self.payload.stop(); + + // we need a read here to make the Acquire fence effective + // we do *not* need this if `dma.stop` does a RMW operation + unsafe { ptr::read_volatile(&0); } + + // we need a fence here for the same reason we need one in `Transfer.wait` + atomic::compiler_fence(Ordering::Acquire); + + // `Transfer` needs to have a `Drop` implementation, because we accept + // managed buffers that can free their memory on drop. Because of that + // we can't move out of the `Transfer`'s fields, so we use `ptr::read` + // and `mem::forget`. + // + // NOTE(unsafe) There is no panic branch between getting the resources + // and forgetting `self`. + unsafe { + let buffer = ptr::read(&self.buffer); + let payload = ptr::read(&self.payload); + mem::forget(self); + (buffer, payload) + } + } + } + + impl Transfer> + where + TxDma: TransferPayload, + { + pub fn is_done(&self) -> bool { + !self.payload.channel.in_progress() + } + + pub fn wait(mut self) -> (BUFFER, TxDma) { + while !self.is_done() {} + + atomic::compiler_fence(Ordering::Acquire); + + self.payload.stop(); + + // we need a read here to make the Acquire fence effective + // we do *not* need this if `dma.stop` does a RMW operation + unsafe { ptr::read_volatile(&0); } + + // we need a fence here for the same reason we need one in `Transfer.wait` + atomic::compiler_fence(Ordering::Acquire); + + // `Transfer` needs to have a `Drop` implementation, because we accept + // managed buffers that can free their memory on drop. Because of that + // we can't move out of the `Transfer`'s fields, so we use `ptr::read` + // and `mem::forget`. + // + // NOTE(unsafe) There is no panic branch between getting the resources + // and forgetting `self`. + unsafe { + let buffer = ptr::read(&self.buffer); + let payload = ptr::read(&self.payload); + mem::forget(self); + (buffer, payload) + } + } + } + + impl Transfer> + where + RxDma: TransferPayload, + { + pub fn peek(&self) -> &[T] + where + BUFFER: AsRef<[T]>, + { + let pending = self.payload.channel.get_ndtr() as usize; + + let slice = self.buffer.as_ref(); + let capacity = slice.len(); + + &slice[..(capacity - pending)] + } + } + )+ + + impl DmaExt for $DMAX { + type Channels = Channels; + + fn split(self, rcc: &mut Rcc) -> Channels { + rcc.regs.ahbenr.modify(|_, w| w.$dmaen().set_bit()); + + // reset the DMA control registers (stops all on-going transfers) + $( + self.$chX.cr.reset(); + )+ + + Channels((), $($CX { _0: () }),+) + } + } + } + )+ + } +} + +dma! { + DMA1: (dma1, dma1en, { + C1: ( + ch1, + htif1, tcif1, + chtif1, ctcif1, cgif1 + ), + C2: ( + ch2, + htif2, tcif2, + chtif2, ctcif2, cgif2 + ), + C3: ( + ch3, + htif3, tcif3, + chtif3, ctcif3, cgif3 + ), + C4: ( + ch4, + htif4, tcif4, + chtif4, ctcif4, cgif4 + ), + C5: ( + ch5, + htif5, tcif5, + chtif5, ctcif5, cgif5 + ), + C6: ( + ch6, + htif6, tcif6, + chtif6, ctcif6, cgif6 + ), + C7: ( + ch7, + htif7, tcif7, + chtif7, ctcif7, cgif7 + ), + }), +} + +/// DMA Receiver +pub struct RxDma { + pub(crate) payload: PAYLOAD, + pub channel: RXCH, +} + +/// DMA Transmitter +pub struct TxDma { + pub(crate) payload: PAYLOAD, + pub channel: TXCH, +} + +/// DMA Receiver/Transmitter +pub struct RxTxDma { + pub(crate) payload: PAYLOAD, + pub rxchannel: RXCH, + pub txchannel: TXCH, +} + +pub trait Receive { + type RxChannel; + type TransmittedWord; +} + +pub trait Transmit { + type TxChannel; + type ReceivedWord; +} + +/// Trait for circular DMA readings from peripheral to memory. +pub trait CircReadDma: Receive +where + &'static mut [B; 2]: StaticWriteBuffer, + B: 'static, + Self: core::marker::Sized, +{ + fn circ_read(self, buffer: &'static mut [B; 2]) -> CircBuffer; +} + +/// Trait for DMA readings from peripheral to memory. +pub trait ReadDma: Receive +where + B: StaticWriteBuffer, + Self: core::marker::Sized + TransferPayload, +{ + fn read(self, buffer: B) -> Transfer; +} + +/// Trait for DMA writing from memory to peripheral. +pub trait WriteDma: Transmit +where + B: StaticReadBuffer, + Self: core::marker::Sized + TransferPayload, +{ + fn write(self, buffer: B) -> Transfer; +} diff --git a/src/lib.rs b/src/lib.rs index e6fbcb5..9ba485d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -90,6 +90,9 @@ pub mod watchdog; ))] pub mod can; +#[cfg(feature = "device-selected")] +pub mod dma; + #[cfg(feature = "device-selected")] #[deprecated(since = "0.17.0", note = "please use `pac` instead")] pub use pac as stm32;