Files
micropython/ports/rp2/mpthreadport.c
Damien George dc2a4e3cbd rp2/mpthreadport: Fix race with IRQ when entering atomic section.
Prior to this commit there is a potential deadlock in
mp_thread_begin_atomic_section(), when obtaining the atomic_mutex, in the
following situation:
- main thread calls mp_thread_begin_atomic_section() (for whatever reason,
  doesn't matter)
- the second core is running so the main thread grabs the mutex via the
  call mp_thread_mutex_lock(&atomic_mutex, 1), and this succeeds
- before the main thread has a chance to run save_and_disable_interrupts()
  a USB IRQ comes in and the main thread jumps off to process this IRQ
- that USB processing triggers a call to the dcd_event_handler() wrapper
  from commit bcbdee2357
- that then calls mp_sched_schedule_node()
- that then attempts to obtain the atomic section, calling
  mp_thread_begin_atomic_section()
- that call then blocks trying to obtain atomic_mutex
- core0 is now deadlocked on itself, because the main thread has the mutex
  but the IRQ handler (which preempted the main thread) is blocked waiting
  for the mutex, which will never be free

The solution in this commit is to use mutex enter/exit functions that also
atomically disable/restore interrupts.

Fixes issues #12980 and #13288.

Signed-off-by: Damien George <damien@micropython.org>
2024-01-03 15:59:11 +11:00

162 lines
4.8 KiB
C

/*
* This file is part of the MicroPython project, http://micropython.org/
*
* The MIT License (MIT)
*
* Copyright (c) 2020-2021 Damien P. George
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "py/runtime.h"
#include "py/gc.h"
#include "py/mphal.h"
#include "py/mpthread.h"
#include "pico/stdlib.h"
#include "pico/multicore.h"
#include "mutex_extra.h"
#if MICROPY_PY_THREAD
extern uint8_t __StackTop, __StackBottom;
void *core_state[2];
// This will be non-NULL while Python code is executing.
STATIC void *(*core1_entry)(void *) = NULL;
STATIC void *core1_arg = NULL;
STATIC uint32_t *core1_stack = NULL;
STATIC size_t core1_stack_num_words = 0;
// Thread mutex.
STATIC mutex_t atomic_mutex;
uint32_t mp_thread_begin_atomic_section(void) {
if (core1_entry) {
// When both cores are executing, we also need to provide
// full mutual exclusion.
return mutex_enter_blocking_and_disable_interrupts(&atomic_mutex);
} else {
return save_and_disable_interrupts();
}
}
void mp_thread_end_atomic_section(uint32_t state) {
if (atomic_mutex.owner != LOCK_INVALID_OWNER_ID) {
mutex_exit_and_restore_interrupts(&atomic_mutex, state);
} else {
restore_interrupts(state);
}
}
// Initialise threading support.
void mp_thread_init(void) {
assert(get_core_num() == 0);
mutex_init(&atomic_mutex);
// Allow MICROPY_BEGIN_ATOMIC_SECTION to be invoked from core1.
multicore_lockout_victim_init();
mp_thread_set_state(&mp_state_ctx.thread);
core1_entry = NULL;
}
// Shutdown threading support -- stops the second thread.
void mp_thread_deinit(void) {
assert(get_core_num() == 0);
// Must ensure that core1 is not currently holding the GC lock, otherwise
// it will be terminated while holding the lock.
mp_thread_mutex_lock(&MP_STATE_MEM(gc_mutex), 1);
multicore_reset_core1();
core1_entry = NULL;
mp_thread_mutex_unlock(&MP_STATE_MEM(gc_mutex));
}
void mp_thread_gc_others(void) {
if (core1_entry != NULL) {
// Collect core1's stack if it is active.
gc_collect_root((void **)&core1_stack, 1);
gc_collect_root((void **)&core1_arg, 1);
}
if (get_core_num() == 1) {
// GC running on core1, trace core0's stack.
gc_collect_root((void **)&__StackBottom, (&__StackTop - &__StackBottom) / sizeof(uintptr_t));
}
}
STATIC void core1_entry_wrapper(void) {
// Allow MICROPY_BEGIN_ATOMIC_SECTION to be invoked from core0.
multicore_lockout_victim_init();
if (core1_entry) {
core1_entry(core1_arg);
}
core1_entry = NULL;
// returning from here will loop the core forever (WFI)
}
mp_uint_t mp_thread_get_id(void) {
// On RP2, there are only two threads, one for each core, so the thread id
// is the core number.
return get_core_num();
}
mp_uint_t mp_thread_create(void *(*entry)(void *), void *arg, size_t *stack_size) {
// Check if core1 is already in use.
if (core1_entry != NULL) {
mp_raise_msg(&mp_type_OSError, MP_ERROR_TEXT("core1 in use"));
}
core1_entry = entry;
core1_arg = arg;
if (*stack_size == 0) {
*stack_size = 4096; // default stack size
} else if (*stack_size < 2048) {
*stack_size = 2048; // minimum stack size
}
// Round stack size to a multiple of the word size.
core1_stack_num_words = *stack_size / sizeof(uint32_t);
*stack_size = core1_stack_num_words * sizeof(uint32_t);
// Allocate stack.
core1_stack = m_new(uint32_t, core1_stack_num_words);
// Create thread on core1.
multicore_reset_core1();
multicore_launch_core1_with_stack(core1_entry_wrapper, core1_stack, *stack_size);
// Adjust stack_size to provide room to recover from hitting the limit.
*stack_size -= 512;
return 1;
}
void mp_thread_start(void) {
}
void mp_thread_finish(void) {
}
#endif // MICROPY_PY_THREAD