Open Model Railroad Network (OpenMRN)
Loading...
Searching...
No Matches
Esp32HardwareTwai.cxx
Go to the documentation of this file.
1
38// Ensure we only compile this code for the ESP32 family of MCUs.
39#if defined(ESP_PLATFORM)
40
41#include "sdkconfig.h"
42
43#if CONFIG_VFS_SUPPORT_TERMIOS
44// remove defines added by arduino-esp32 core/esp32/binary.h which are
45// duplicated in sys/termios.h which may be included by esp_vfs.h
46#undef B110
47#undef B1000000
48#endif // CONFIG_VFS_SUPPORT_TERMIOS
49
50#include <assert.h>
51#include <driver/gpio.h>
52#include <esp_idf_version.h>
53
54#if ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(5,1,0)
55#include <esp_clk_tree.h>
56#endif
57#include <esp_private/periph_ctrl.h>
58#include <esp_ipc.h>
59#include <esp_log.h>
60#include <esp_rom_gpio.h>
61#include <esp_intr_alloc.h>
62#include <esp_task.h>
63#include <esp_vfs.h>
64#include <fcntl.h>
65#include <hal/twai_types.h>
66#include <hal/twai_hal.h>
67#include <soc/gpio_sig_map.h>
68#include <stdint.h>
69
70#include "can_frame.h"
71#include "can_ioctl.h"
75#include "utils/Atomic.hxx"
76#include "utils/logging.h"
77
78namespace openmrn_arduino
79{
80
84static constexpr int TWAI_VFS_FD = 0;
85
87static constexpr BaseType_t WATCHDOG_TASK_PRIORITY = ESP_TASK_TCPIP_PRIO - 1;
88
90static constexpr BaseType_t WATCHDOG_TASK_STACK = 2548;
91
93static constexpr TickType_t STATUS_PRINT_INTERVAL = pdMS_TO_TICKS(10000);
94
97static constexpr uint32_t TWAI_DEFAULT_INTERRUPTS = 0xE7;
98
101static constexpr uint32_t TWAI_INTERRUPT_FLAGS = ESP_INTR_FLAG_LOWMED;
102
104static constexpr const char *TWAI_LOG_TAG = "ESP-TWAI";
105
107typedef struct
108{
110 esp32_twai_stats_t stats;
111
113 twai_hal_context_t context;
114
116 intr_handle_t isr_handle;
117
120
123
125 Atomic buf_lock;
126
128 Notifiable* readable_notify;
129
131 Notifiable* writable_notify;
132
135 bool non_blocking;
136
137#if CONFIG_VFS_SUPPORT_SELECT
139 Atomic select_lock;
140
144 esp_vfs_select_sem_t select_sem;
145
148 fd_set *readfds;
149
152 fd_set readfds_orig;
153
156 fd_set *writefds;
157
160 fd_set writefds_orig;
161
164 fd_set *exceptfds;
165
168 fd_set exceptfds_orig;
169#endif // CONFIG_VFS_SUPPORT_SELECT
170
173 bool active;
174
177 os_thread_t wd_thread;
178
181 bool report_stats;
182} TwaiDriver;
183
185static TwaiDriver twai;
186
189static inline bool is_twai_running()
190{
191 return twai_hal_check_state_flags(&twai.context, TWAI_HAL_STATE_FLAG_RUNNING);
192}
193
196static inline bool is_twai_recovering()
197{
198 return twai_hal_check_state_flags(&twai.context, TWAI_HAL_STATE_FLAG_RECOVERING);
199}
200
203static inline bool is_twai_err_warn()
204{
205 return twai_hal_check_state_flags(&twai.context, TWAI_HAL_STATE_FLAG_ERR_WARN);
206}
207
210static inline bool is_twai_err_passive()
211{
212 return twai_hal_check_state_flags(&twai.context, TWAI_HAL_STATE_FLAG_ERR_PASSIVE);
213}
214
217static inline bool is_twai_bus_off()
218{
219 return twai_hal_check_state_flags(&twai.context, TWAI_HAL_STATE_FLAG_BUS_OFF);
220}
221
223static inline bool is_twai_tx_occupied()
224{
225 return twai_hal_check_state_flags(&twai.context, TWAI_HAL_STATE_FLAG_TX_BUFF_OCCUPIED);
226}
227
230static inline void twai_purge_rx_queue()
231{
232 Notifiable* n = nullptr;
233 {
234 AtomicHolder h(&twai.buf_lock);
235 LOG(VERBOSE, "ESP-TWAI: purging RX-Q: %zu", twai.rx_buf->pending());
236 twai.stats.rx_missed += twai.rx_buf->pending();
237 twai.rx_buf->flush();
238 std::swap(n, twai.readable_notify);
239 }
240 if (n)
241 {
242 n->notify();
243 }
244#if CONFIG_VFS_SUPPORT_SELECT
245 AtomicHolder l(&twai.select_lock);
246 if (FD_ISSET(TWAI_VFS_FD, &twai.exceptfds_orig))
247 {
248 FD_SET(TWAI_VFS_FD, twai.exceptfds);
249 esp_vfs_select_triggered(twai.select_sem);
250 }
251#endif // CONFIG_VFS_SUPPORT_SELECT
252}
253
256static inline void twai_purge_tx_queue()
257{
258 Notifiable* n = nullptr;
259 {
260 AtomicHolder h(&twai.buf_lock);
261 LOG(VERBOSE, "ESP-TWAI: purging TX-Q: %zu", twai.tx_buf->pending());
262 twai.stats.tx_failed += twai.tx_buf->pending();
263 twai.tx_buf->flush();
264 std::swap(n, twai.writable_notify);
265 }
266 if (n)
267 {
268 n->notify();
269 }
270#if CONFIG_VFS_SUPPORT_SELECT
271 AtomicHolder l(&twai.select_lock);
272 if (FD_ISSET(TWAI_VFS_FD, &twai.exceptfds_orig))
273 {
274 FD_SET(TWAI_VFS_FD, twai.exceptfds);
275 esp_vfs_select_triggered(twai.select_sem);
276 }
277#endif // CONFIG_VFS_SUPPORT_SELECT
278}
279
287static ssize_t twai_vfs_write(int fd, const void *buf, size_t size)
288{
289 LOG(VERBOSE, "ESP-TWAI: write(%d, %p, %zu)", fd, buf, size);
290 DASSERT(fd == TWAI_VFS_FD);
291 ssize_t sent = 0;
292 const struct can_frame *data = (const struct can_frame *)buf;
293 size /= sizeof(struct can_frame);
294 bool bus_error = false;
295 while (size && !bus_error)
296 {
297 if (is_twai_bus_off())
298 {
299 // If the TWAI bus is OFF initiate recovery and purge the pending TX queue.
300 LOG_ERROR("ESP-TWAI: Bus is OFF, initiating recovery.");
301 twai_hal_start_bus_recovery(&twai.context);
302 bus_error = true;
303 break;
304 }
305 else if (!is_twai_running())
306 {
307 LOG_ERROR("ESP-TWAI: TWAI driver is not running, unable to write "
308 "%zu frames.", size);
309 bus_error = true;
310 break;
311 }
312
313 size_t frames_written = 0;
314 {
315 AtomicHolder h(&twai.buf_lock);
316 frames_written = twai.tx_buf->put(data, size < 8 ? size : 8);
317 }
318 if (frames_written == 0)
319 {
320 // No space in the TX queue
321 break;
322 }
323 else
324 {
325 twai.stats.tx_processed += frames_written;
326 }
327
328 if (is_twai_running() && !is_twai_tx_occupied() && frames_written)
329 {
330 // since the TX buffer is not occupied, retrieve the first
331 // frame and transmit it here.
332 AtomicHolder h(&twai.buf_lock);
333 struct can_frame *frame = nullptr;
334 twai_message_t tx_frame;
335 twai_hal_frame_t hal_frame;
336 if (twai.tx_buf->data_read_pointer(&frame) && frame != nullptr)
337 {
338 memset(&tx_frame, 0, sizeof(twai_message_t));
339 tx_frame.identifier = frame->can_id;
340 tx_frame.extd = IS_CAN_FRAME_EFF(*frame);
341 tx_frame.rtr = IS_CAN_FRAME_RTR(*frame);
342 tx_frame.data_length_code = frame->can_dlc;
343 memcpy(tx_frame.data, frame->data, frame->can_dlc);
344 twai_hal_format_frame(&tx_frame, &hal_frame);
345 twai_hal_set_tx_buffer_and_transmit(&twai.context, &hal_frame);
346 }
347 }
348 sent += frames_written;
349 size -= frames_written;
350 }
351
352 if (bus_error)
353 {
354 twai_purge_tx_queue();
355 }
356
357 if (!sent)
358 {
359 errno = EWOULDBLOCK;
360 }
361 LOG(VERBOSE, "ESP-TWAI: write() %zu", sent * sizeof(struct can_frame));
362 return sent * sizeof(struct can_frame);
363}
364
372static ssize_t twai_vfs_read(int fd, void *buf, size_t size)
373{
374 LOG(VERBOSE, "ESP-TWAI: read(%d, %p, %zu)", fd, buf, size);
375 DASSERT(fd == TWAI_VFS_FD);
376
377 ssize_t received = 0;
378 struct can_frame *data = (struct can_frame *)buf;
379 size /= sizeof(struct can_frame);
380 while (size)
381 {
382 size_t received_frames = 0;
383 {
384 AtomicHolder h(&twai.buf_lock);
385 received_frames = twai.rx_buf->get(data, size < 8 ? size : 8);
386 }
387 if (received_frames == 0)
388 {
389 break;
390 }
391 twai.stats.rx_processed += received_frames;
392 size -= received_frames;
393 received += received_frames;
394 data += received_frames;
395 }
396 if (!received)
397 {
398 errno = EWOULDBLOCK;
399 return -1;
400 }
401
402 LOG(VERBOSE, "ESP-TWAI: read() %zu", received * sizeof(struct can_frame));
403 return received * sizeof(struct can_frame);
404}
405
416static int twai_vfs_open(const char *path, int flags, int mode)
417{
418 // skip past the '/' that is passed in as first character
419 path++;
420 twai.non_blocking = (flags & O_NONBLOCK);
421
422 LOG(INFO, "ESP-TWAI: Starting TWAI driver on:%s mode:%x (%s) fd:%d",
423 path, mode, twai.non_blocking ? "non-blocking" : "blocking",
424 TWAI_VFS_FD);
425 twai_purge_rx_queue();
426 twai_purge_tx_queue();
427 twai_hal_start(&twai.context, TWAI_MODE_NORMAL);
428 return TWAI_VFS_FD;
429}
430
439static int twai_vfs_close(int fd)
440{
441 LOG(INFO, "ESP-TWAI: Disabling TWAI driver using fd:%d", fd);
442 twai_purge_rx_queue();
443 twai_purge_tx_queue();
444 twai_hal_stop(&twai.context);
445 return 0;
446}
447
455static int twai_vfs_ioctl(int fd, int cmd, va_list args)
456{
457 /* sanity check to be sure we have a valid key for this device */
459
460 // Will be called at the end if non-null.
461 Notifiable* n = nullptr;
462
463 if (IOC_SIZE(cmd) == NOTIFIABLE_TYPE)
464 {
465 n = reinterpret_cast<Notifiable*>(va_arg(args, uintptr_t));
466 }
467
468 switch (cmd)
469 {
470 default:
471 return -EINVAL;
473 {
474 AtomicHolder h(&twai.buf_lock);
475 if (!twai.rx_buf->pending())
476 {
477 std::swap(n, twai.readable_notify);
478 }
479 }
480 break;
482 {
483 AtomicHolder h(&twai.buf_lock);
484 if (!twai.tx_buf->space())
485 {
486 std::swap(n, twai.writable_notify);
487 }
488 }
489 break;
490 }
491 if (n)
492 {
493 n->notify();
494 }
495 return 0;
496}
497
507static int twai_vfs_fcntl(int fd, int cmd, int arg)
508{
509 HASSERT(fd == TWAI_VFS_FD);
510 int result = 0;
511
512 if (cmd == F_GETFL)
513 {
514 if (twai.non_blocking)
515 {
516 result |= O_NONBLOCK;
517 }
518 }
519 else if (cmd == F_SETFL)
520 {
521 twai.non_blocking = arg & O_NONBLOCK;
522 }
523 else
524 {
525 errno = ENOSYS;
526 result = -1;
527 }
528
529 return result;
530}
531
532#if CONFIG_VFS_SUPPORT_SELECT
540static esp_err_t twai_vfs_start_select(int nfds, fd_set *readfds,
541 fd_set *writefds, fd_set *exceptfds,
542 esp_vfs_select_sem_t sem,
543 void **end_select_args)
544{
545 AtomicHolder l(&twai.select_lock);
546 // zero the cached copy of the fd_sets before setting the incoming copy in
547 // case the TWAI VFS FD is not set so we do not raise the alert when there
548 // is an interesting event.
549 FD_ZERO(&twai.readfds_orig);
550 FD_ZERO(&twai.writefds_orig);
551 FD_ZERO(&twai.exceptfds_orig);
552
553 // If the TWAI FD is present in any of the FD sets we should process the
554 // select call.
555 if (FD_ISSET(TWAI_VFS_FD, readfds) || FD_ISSET(TWAI_VFS_FD, writefds) ||
556 FD_ISSET(TWAI_VFS_FD, exceptfds))
557 {
558 twai.select_sem = sem;
559 twai.readfds = readfds;
560 twai.readfds_orig = *readfds;
561 twai.writefds = writefds;
562 twai.writefds_orig = *writefds;
563 twai.exceptfds = exceptfds;
564 twai.exceptfds_orig = *exceptfds;
565
566 // zero the fd_sets so we can mark the correct signals when we trigger
567 // the VFS layer.
568 FD_ZERO(readfds);
569 FD_ZERO(writefds);
570 FD_ZERO(exceptfds);
571
572 // Check if we have pending frames to RX, if so trigger an early exit
573 // from select()
574 if (FD_ISSET(TWAI_VFS_FD, &twai.readfds_orig))
575 {
576 AtomicHolder h(&twai.buf_lock);
577 if (twai.rx_buf->pending())
578 {
579 FD_SET(TWAI_VFS_FD, readfds);
580 esp_vfs_select_triggered(sem);
581 }
582 }
583 }
584 return ESP_OK;
585}
586
590static esp_err_t twai_vfs_end_select(void *end_select_args)
591{
592 AtomicHolder l(&twai.select_lock);
593 // zero the cached copy of the fd_sets to prevent triggering the VFS wakeup
594 // since the select() has ended.
595 FD_ZERO(&twai.readfds_orig);
596 FD_ZERO(&twai.writefds_orig);
597 FD_ZERO(&twai.exceptfds_orig);
598 return ESP_OK;
599}
600
601#endif // CONFIG_VFS_SUPPORT_SELECT
602
604static inline uint32_t twai_rx_frames()
605{
606 AtomicHolder h(&twai.buf_lock);
607 uint32_t rx_ready_count = twai_hal_get_rx_msg_count(&twai.context);
608 struct can_frame *can_frame = nullptr;
609 uint32_t rx_count = 0;
610 ESP_EARLY_LOGV(TWAI_LOG_TAG, "rx-ready-count: %" PRIu32, rx_ready_count);
611 for (uint32_t idx = 0; idx < rx_ready_count; idx++)
612 {
613 twai_hal_frame_t frame;
614 if (twai_hal_read_rx_buffer_and_clear(&twai.context, &frame))
615 {
616 if (frame.dlc > TWAI_FRAME_MAX_DLC)
617 {
618 // DLC is longer than supported, discard the frame.
619 twai.stats.rx_discard++;
620 ESP_EARLY_LOGE(TWAI_LOG_TAG, "rx-discard:%" PRIu32,
621 twai.stats.rx_discard);
622 }
623 else if (twai.rx_buf->data_write_pointer(&can_frame))
624 {
625 twai_message_t rx_frame;
626 twai_hal_parse_frame(&frame, &rx_frame);
627 memcpy(can_frame->data, rx_frame.data, TWAI_FRAME_MAX_DLC);
628 can_frame->can_dlc = rx_frame.data_length_code;
629 can_frame->can_id = rx_frame.identifier;
630 if (rx_frame.extd)
631 {
632 SET_CAN_FRAME_EFF(*can_frame);
633 }
634 else
635 {
636 CLR_CAN_FRAME_EFF(*can_frame);
637 }
638 if (rx_frame.rtr)
639 {
640 SET_CAN_FRAME_RTR(*can_frame);
641 }
642 else
643 {
644 CLR_CAN_FRAME_RTR(*can_frame);
645 }
646 rx_count += twai.rx_buf->advance(1);
647 ESP_EARLY_LOGV(TWAI_LOG_TAG, "rx-OK");
648 }
649 else
650 {
651 twai.stats.rx_missed++;
652 ESP_EARLY_LOGV(TWAI_LOG_TAG, "rx-missed:%" PRIu32,
653 twai.stats.rx_missed);
654 }
655 }
656 else
657 {
658 ESP_EARLY_LOGV(TWAI_LOG_TAG, "rx-overrun");
659// If the SOC does not support automatic clearing of the RX FIFO we need to
660// handle it here and break out of the loop.
661#ifndef SOC_TWAI_SUPPORTS_RX_STATUS
662 twai.stats.rx_overrun +=
663 twai_hal_clear_rx_fifo_overrun(&twai.context);
664 break;
665#else
666 twai.stats.rx_overrun++;
667#endif // SOC_TWAI_SUPPORTS_RX_STATUS
668 }
669 }
670
671 return rx_count;
672}
673
675static inline uint32_t twai_tx_frame()
676{
677 AtomicHolder h(&twai.buf_lock);
678 if (twai_hal_check_last_tx_successful(&twai.context))
679 {
680 ESP_EARLY_LOGV(TWAI_LOG_TAG, "TX-OK");
681 twai.stats.tx_success++;
682 twai.tx_buf->consume(1);
683 }
684 else
685 {
686 ESP_EARLY_LOGV(TWAI_LOG_TAG, "TX-FAIL");
687 twai.stats.tx_failed++;
688 }
689
690 // Check if we have a pending frame to transmit in the queue
691 struct can_frame *can_frame = nullptr;
692 if (twai.tx_buf->data_read_pointer(&can_frame) && can_frame != nullptr)
693 {
694 twai_message_t tx_frame;
695 twai_hal_frame_t hal_frame;
696 memset(&tx_frame, 0, sizeof(twai_message_t));
697 tx_frame.identifier = can_frame->can_id;
698 tx_frame.extd = IS_CAN_FRAME_EFF(*can_frame);
699 tx_frame.rtr = IS_CAN_FRAME_RTR(*can_frame);
700 tx_frame.data_length_code = can_frame->can_dlc;
701 memcpy(tx_frame.data, can_frame->data, can_frame->can_dlc);
702 twai_hal_format_frame(&tx_frame, &hal_frame);
703 twai_hal_set_tx_buffer_and_transmit(&twai.context, &hal_frame);
704 return 1;
705 }
706 return 0;
707}
708
712static void twai_isr(void *arg)
713{
714 BaseType_t wakeup = pdFALSE;
715 uint32_t events = twai_hal_get_events(&twai.context);
716 ESP_EARLY_LOGV(TWAI_LOG_TAG, "events: %04" PRIx32, events);
717
718#if defined(CONFIG_TWAI_ERRATA_FIX_RX_FRAME_INVALID) || \
719 defined(CONFIG_TWAI_ERRATA_FIX_RX_FIFO_CORRUPT)
720 if (events & TWAI_HAL_EVENT_NEED_PERIPH_RESET)
721 {
722 ESP_EARLY_LOGV(TWAI_LOG_TAG, "periph-reset");
723 twai_hal_prepare_for_reset(&twai.context);
724 periph_module_reset(PERIPH_TWAI_MODULE);
725 twai_hal_recover_from_reset(&twai.context);
726 twai.stats.rx_lost += twai_hal_get_reset_lost_rx_cnt(&twai.context);
727#if CONFIG_VFS_SUPPORT_SELECT
728 AtomicHolder l(&twai.select_lock);
729 if (FD_ISSET(TWAI_VFS_FD, &twai.exceptfds_orig))
730 {
731 FD_SET(TWAI_VFS_FD, twai.exceptfds);
732 esp_vfs_select_triggered_isr(twai.select_sem, &wakeup);
733 }
734#endif // CONFIG_VFS_SUPPORT_SELECT
735 }
736#endif // TWAI_ERRATA_FIX_RX_FRAME_INVALID || TWAI_ERRATA_FIX_RX_FIFO_CORRUPT
737
738 // RX completed
739 if ((events & TWAI_HAL_EVENT_RX_BUFF_FRAME) && twai_rx_frames())
740 {
741#if CONFIG_VFS_SUPPORT_SELECT
742 AtomicHolder l(&twai.select_lock);
743 if (FD_ISSET(TWAI_VFS_FD, &twai.readfds_orig))
744 {
745 FD_SET(TWAI_VFS_FD, twai.readfds);
746 esp_vfs_select_triggered_isr(twai.select_sem, &wakeup);
747 }
748#endif // CONFIG_VFS_SUPPORT_SELECT
749 // std::swap is not ISR safe so it is not used here.
750 if (twai.readable_notify)
751 {
752 twai.readable_notify->notify_from_isr();
753 twai.readable_notify = nullptr;
754 }
755 }
756
757 // TX completed
758 if ((events & TWAI_HAL_EVENT_TX_BUFF_FREE) && twai_tx_frame())
759 {
760#if CONFIG_VFS_SUPPORT_SELECT
761 AtomicHolder l(&twai.select_lock);
762 if (FD_ISSET(TWAI_VFS_FD, &twai.writefds_orig))
763 {
764 FD_SET(TWAI_VFS_FD, twai.writefds);
765 esp_vfs_select_triggered_isr(twai.select_sem, &wakeup);
766 }
767#endif // CONFIG_VFS_SUPPORT_SELECT
768 // std::swap is not ISR safe so it is not used here.
769 if (twai.writable_notify)
770 {
771 twai.writable_notify->notify_from_isr();
772 twai.writable_notify = nullptr;
773 }
774 }
775
776 // Bus recovery complete, trigger a restart
777 if (events & TWAI_HAL_EVENT_BUS_RECOV_CPLT)
778 {
779 ESP_EARLY_LOGV(TWAI_LOG_TAG, "bus recovery complete");
780 // start the driver automatically
781 twai_hal_start(&twai.context, TWAI_MODE_NORMAL);
782 }
783
784 // Bus error detected
785 if (events & TWAI_HAL_EVENT_BUS_ERR)
786 {
787 twai.stats.bus_error++;
788 ESP_EARLY_LOGV(TWAI_LOG_TAG, "bus-error:%" PRIu32,
789 twai.stats.bus_error);
790 }
791
792 // Arbitration error detected
793 if (events & TWAI_HAL_EVENT_ARB_LOST)
794 {
795 twai.stats.arb_loss++;
796 ESP_EARLY_LOGV(TWAI_LOG_TAG, "arb-lost:%" PRIu32,
797 twai.stats.arb_loss);
798 }
799
800 if (wakeup == pdTRUE)
801 {
802 portYIELD_FROM_ISR();
803 }
804}
805
823void* twai_watchdog(void* param)
824{
825 LOG(INFO, "ESP-TWAI: Starting TWAI watchdog and reporting task");
826 size_t last_rx_pending = 0;
827 size_t last_tx_pending = 0;
828 uint32_t last_twai_state = 0;
829
830 while (twai.active)
831 {
832 // delay until the next reporting interval, this is being used instead
833 // of vTaskDelay to allow early wake up in the case of shutdown of the
834 // TWAI driver.
835 ulTaskNotifyTake(pdTRUE, STATUS_PRINT_INTERVAL);
836
837 // If we wake up and the TWAI driver is no longer active we should exit
838 // this loop for shutdown.
839 if (!twai.active)
840 {
841 break;
842 }
843
844 // If the last status of the bus and current status are the same and it
845 // is in a recovery state, retrigger the recovery as it will remain
846 // stuck indefinitely without a retrigger.
847 if (last_twai_state == twai.context.state_flags &&
848 is_twai_recovering())
849 {
850 LOG(WARNING,
851 "ESP-TWAI: Bus appears to be stuck, initiating bus recovery.");
852 twai_hal_start_bus_recovery(&twai.context);
853 }
854 last_twai_state = twai.context.state_flags;
855
856 // If the RX queue has not changed since our last check, purge the RX
857 // queue and track it as missed frames.
858 if (last_rx_pending && last_rx_pending == twai.rx_buf->pending())
859 {
860 LOG_ERROR("ESP-TWAI: RX-Q appears stuck, purging RX-Q!");
861 twai_purge_rx_queue();
862 }
863 last_rx_pending = twai.rx_buf->pending();
864
865 // If the TX queue has not changed since our last check, purge the RX
866 // queue and track it as failed frames.
867 if (last_tx_pending && last_tx_pending == twai.tx_buf->pending())
868 {
869 LOG_ERROR("ESP-TWAI: TX-Q appears stuck, purging TX-Q!");
870 twai_purge_tx_queue();
871 }
872 last_tx_pending = twai.tx_buf->pending();
873
874 if (twai.report_stats)
875 {
876 LOG(INFO,
877 "ESP-TWAI: "
878 "RX:%" PRIu32 " (pending:%zu,overrun:%" PRIu32
879 ",discard:%" PRIu32 ",missed:%" PRIu32 ",lost:%" PRIu32 ") "
880 "TX:%" PRIu32 " (pending:%zu,suc:%" PRIu32
881 ",fail:%" PRIu32 ") "
882 "Bus (arb-err:%" PRIu32 ",err:%" PRIu32 ",state:%s)",
883 twai.stats.rx_processed, twai.rx_buf->pending(),
884 twai.stats.rx_overrun, twai.stats.rx_discard,
885 twai.stats.rx_missed, twai.stats.rx_lost,
886 twai.stats.tx_processed, twai.tx_buf->pending(),
887 twai.stats.tx_success, twai.stats.tx_failed,
888 twai.stats.arb_loss, twai.stats.bus_error,
889 is_twai_running() ? "Running" :
890 is_twai_recovering() ? "Recovering" :
891 is_twai_err_warn() ? "Err-Warn" :
892 is_twai_err_passive() ? "Err-Pasv" :
893 "Bus Off");
894 }
895 }
896 LOG(VERBOSE, "ESP-TWAI: Stopping TWAI watchdog and reporting task");
897
898 return NULL;
899}
900
902 int rx, int tx, bool report, size_t rx_size, size_t tx_size,
903 const char *path, int clock_out, int bus_status, uint32_t isr_core)
904 : rxPin_(rx), txPin_(tx), extClockPin_(clock_out),
905 busStatusPin_(bus_status), preferredIsrCore_(isr_core), vfsPath_(path)
906{
907 HASSERT(GPIO_IS_VALID_GPIO(rxPin_));
908 HASSERT(GPIO_IS_VALID_OUTPUT_GPIO(txPin_));
909
910 if (extClockPin_ != GPIO_NUM_NC)
911 {
912 HASSERT(GPIO_IS_VALID_OUTPUT_GPIO(extClockPin_));
913 }
914
915 if (busStatusPin_ != GPIO_NUM_NC)
916 {
917 HASSERT(GPIO_IS_VALID_OUTPUT_GPIO(busStatusPin_));
918 }
919
920 memset(&twai.stats, 0, sizeof(esp32_twai_stats_t));
921
922 twai.rx_buf = DeviceBuffer<struct can_frame>::create(rx_size);
923 HASSERT(twai.rx_buf != nullptr);
924
925 twai.tx_buf =
926 DeviceBuffer<struct can_frame>::create(tx_size, tx_size / 2);
927 HASSERT(twai.tx_buf != nullptr);
928
929 twai.report_stats = report;
930}
931
932Esp32HardwareTwai::~Esp32HardwareTwai()
933{
934 if (twai.active)
935 {
936 esp_intr_free(twai.isr_handle);
937 twai_hal_deinit(&twai.context);
938 }
939 twai.active = false;
940
941 esp_vfs_unregister(vfsPath_);
942
943 twai.tx_buf->destroy();
944 twai.rx_buf->destroy();
945
946 if (twai.wd_thread)
947 {
948 xTaskNotifyGive(twai.wd_thread);
949 }
950}
951
955static void esp32_twai_isr_init(void *param)
956{
957 LOG(VERBOSE, "ESP-TWAI: Allocating ISR");
958 ESP_ERROR_CHECK(
959 esp_intr_alloc(ETS_TWAI_INTR_SOURCE, TWAI_INTERRUPT_FLAGS, twai_isr,
960 nullptr, &twai.isr_handle));
961}
962
963void Esp32HardwareTwai::hw_init()
964{
965 LOG(INFO,
966 "ESP-TWAI: Configuring TWAI (TX:%d, RX:%d, EXT-CLK:%d, BUS-CTRL:%d)",
967 txPin_, rxPin_, extClockPin_, busStatusPin_);
968 gpio_set_pull_mode((gpio_num_t)txPin_, GPIO_FLOATING);
969 esp_rom_gpio_connect_out_signal(txPin_, TWAI_TX_IDX, false, false);
970 esp_rom_gpio_pad_select_gpio(txPin_);
971
972 gpio_set_pull_mode((gpio_num_t)rxPin_, GPIO_FLOATING);
973 esp_rom_gpio_connect_in_signal(rxPin_, TWAI_RX_IDX, false);
974 esp_rom_gpio_pad_select_gpio(rxPin_);
975 gpio_set_direction((gpio_num_t)rxPin_, GPIO_MODE_INPUT);
976
977 if (extClockPin_ != GPIO_NUM_NC)
978 {
979 gpio_set_pull_mode((gpio_num_t)extClockPin_, GPIO_FLOATING);
980 esp_rom_gpio_connect_out_signal(extClockPin_, TWAI_CLKOUT_IDX, false,
981 false);
982 esp_rom_gpio_pad_select_gpio((gpio_num_t)extClockPin_);
983 }
984
985 if (busStatusPin_ != GPIO_NUM_NC)
986 {
987 gpio_set_pull_mode((gpio_num_t)busStatusPin_, GPIO_FLOATING);
988 esp_rom_gpio_connect_out_signal(extClockPin_, TWAI_BUS_OFF_ON_IDX,
989 false, false);
990 esp_rom_gpio_pad_select_gpio((gpio_num_t)busStatusPin_);
991 }
992
993 esp_vfs_t vfs = {};
994 vfs.write = twai_vfs_write;
995 vfs.read = twai_vfs_read;
996 vfs.open = twai_vfs_open;
997 vfs.close = twai_vfs_close;
998 vfs.fcntl = twai_vfs_fcntl;
999 vfs.ioctl = twai_vfs_ioctl;
1000#if CONFIG_VFS_SUPPORT_SELECT
1001 vfs.start_select = twai_vfs_start_select;
1002 vfs.end_select = twai_vfs_end_select;
1003#endif // CONFIG_VFS_SUPPORT_SELECT
1004 vfs.flags = ESP_VFS_FLAG_DEFAULT;
1005 ESP_ERROR_CHECK(esp_vfs_register(vfsPath_, &vfs, this));
1006
1007 periph_module_reset(PERIPH_TWAI_MODULE);
1008 periph_module_enable(PERIPH_TWAI_MODULE);
1009
1010 twai_timing_config_t timingCfg = TWAI_TIMING_CONFIG_125KBITS();
1011 twai_filter_config_t filterCfg = TWAI_FILTER_CONFIG_ACCEPT_ALL();
1012
1013#if ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(5,1,0)
1014 // default clock source if not specified in config.
1015 if (timingCfg.clk_src == 0)
1016 {
1017 timingCfg.clk_src = TWAI_CLK_SRC_DEFAULT;
1018 }
1019 twai_hal_config_t twai_hal_cfg =
1020 {
1021 .controller_id = 0,
1022 .clock_source_hz = 0,
1023 };
1024
1025 // retrieve the clock frequency from the SoC
1026 esp_clk_tree_src_get_freq_hz((soc_module_clk_t)timingCfg.clk_src,
1027 ESP_CLK_TREE_SRC_FREQ_PRECISION_CACHED, &twai_hal_cfg.clock_source_hz);
1028
1029 // BRP validations
1030 uint32_t brp = timingCfg.brp;
1031 if (timingCfg.quanta_resolution_hz)
1032 {
1033 HASSERT(twai_hal_cfg.clock_source_hz % timingCfg.quanta_resolution_hz == 0);
1034 brp = twai_hal_cfg.clock_source_hz / timingCfg.quanta_resolution_hz;
1035 }
1036 HASSERT(twai_ll_check_brp_validation(brp));
1037
1038 // Initialize the low level HAL APIs
1039 HASSERT(twai_hal_init(&twai.context, &twai_hal_cfg));
1040#else
1041 // Initialize the low level HAL APIs
1042 HASSERT(twai_hal_init(&twai.context));
1043#endif // IDF v5.1+
1044
1045 LOG(VERBOSE, "ESP-TWAI: Initiailizing peripheral");
1046 twai_hal_configure(&twai.context, &timingCfg, &filterCfg,
1047 TWAI_DEFAULT_INTERRUPTS, 0);
1048
1049#if SOC_CPU_CORES_NUM > 1
1050 ESP_ERROR_CHECK(
1051 esp_ipc_call_blocking(preferredIsrCore_, esp32_twai_isr_init, nullptr));
1052#else
1053 esp32_twai_isr_init(nullptr);
1054#endif // SOC_CPU_CORES_NUM > 1
1055 twai.active = true;
1056
1057 os_thread_create(&twai.wd_thread, "TWAI-WD", WATCHDOG_TASK_PRIORITY,
1058 WATCHDOG_TASK_STACK, twai_watchdog, this);
1059}
1060
1061void Esp32HardwareTwai::get_driver_stats(esp32_twai_stats_t *stats)
1062{
1063 HASSERT(stats != nullptr);
1064 memcpy(stats, &twai.stats, sizeof(esp32_twai_stats_t));
1065}
1066
1067} // namespace openmrn_arduino
1068
1069#endif // ESP_PLATFORM
OSSem sem[1]
One semaphore required per instance pointer.
Definition CC32xxSPI.cxx:55
static OSEvent wakeup
event used to wakeup select calls
Definition Select.cxx:40
See OSMutexLock in os/OS.hxx.
Definition Atomic.hxx:153
Lightweight locking class for protecting small critical sections.
Definition Atomic.hxx:130
Implements a smart buffer specifically designed for character device drivers.
static DeviceBuffer * create(size_t size, size_t level=0)
Create a DeviceBuffer instance.
An object that can schedule itself on an executor to run.
virtual void notify()=0
Generic callback.
Esp32HardwareTwai()
Default constructor.
#define CAN_IOC_READ_ACTIVE
read active ioctl.
#define NOTIFIABLE_TYPE
ioctl minor type used for the read/write active notifiable integration.
#define CAN_IOC_WRITE_ACTIVE
write active ioctl.
#define CAN_IOC_MAGIC
Magic number for this driver's ioctl calls.
#define IOC_SIZE(_num)
Decode ioctl size.
#define IOC_TYPE(_num)
Decode ioctl type.
#define LOG(level, message...)
Conditionally write a message to the logging output.
Definition logging.h:99
static const int VERBOSE
Loglevel that is usually not printed, reporting debugging information.
Definition logging.h:59
static const int WARNING
Loglevel that is always printed, reporting a warning or a retryable error.
Definition logging.h:55
static const int INFO
Loglevel that is printed by default, reporting some status information.
Definition logging.h:57
#define LOG_ERROR(message...)
Shorthand for LOG(LEVEL_ERROR, message...). See LOG.
Definition logging.h:124
#define HASSERT(x)
Checks that the value of expression x is true, else terminates the current process.
Definition macros.h:138
#define DASSERT(x)
Debug assertion facility.
Definition macros.h:159
int os_thread_create(os_thread_t *thread, const char *name, int priority, size_t stack_size, void *(*start_routine)(void *), void *arg)
Create a thread.
Definition os.c:450