From cbd81d59cfea4f7b881642e804646da3a328a712 Mon Sep 17 00:00:00 2001
From: Chris Eibl <138194463+chris-eibl@users.noreply.github.com>
Date: Sun, 12 Apr 2026 07:49:18 +0200
Subject: [PATCH 1/6] GH-148047: Check early whether tail-calling is possible
for MSVC builds on Windows (#148036)
Rather than failing late when compiling e.g. a debug configuration
```
build.bat -c debug --tail-call-interp
```
with hundreds of
```
error C4737: Unable to perform required tail call. Performance may be degraded.
```
-- fail early with an explicit error message for configurations that are not supported by MSVC.
This is a follow-up on https://github.com/python/cpython/issues/140513 / https://github.com/python/cpython/pull/140548
---
.../Build/2026-04-03-20-09-46.gh-issue-148047.HE6iGK.rst | 2 ++
PCbuild/pythoncore.vcxproj | 9 +++++++++
2 files changed, 11 insertions(+)
create mode 100644 Misc/NEWS.d/next/Build/2026-04-03-20-09-46.gh-issue-148047.HE6iGK.rst
diff --git a/Misc/NEWS.d/next/Build/2026-04-03-20-09-46.gh-issue-148047.HE6iGK.rst b/Misc/NEWS.d/next/Build/2026-04-03-20-09-46.gh-issue-148047.HE6iGK.rst
new file mode 100644
index 00000000000000..e43a2695a1316e
--- /dev/null
+++ b/Misc/NEWS.d/next/Build/2026-04-03-20-09-46.gh-issue-148047.HE6iGK.rst
@@ -0,0 +1,2 @@
+Fail fast with an explicit and clear error message if tail-calling is not
+possible for MSVC builds on Windows. Patch by Chris Eibl.
diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj
index 61bee29c0af3d6..9356a66dfb4642 100644
--- a/PCbuild/pythoncore.vcxproj
+++ b/PCbuild/pythoncore.vcxproj
@@ -749,4 +749,13 @@
+
+
+
+
+
+
From 3a7df632c96eb6c5de12fac08d1da42df9e25334 Mon Sep 17 00:00:00 2001
From: "Gregory P. Smith" <68491+gpshead@users.noreply.github.com>
Date: Sat, 11 Apr 2026 23:06:19 -0700
Subject: [PATCH 2/6] gh-146313: Fix multiprocessing ResourceTracker deadlock
after os.fork() (GH-146316)
`ResourceTracker.__del__` (added in gh-88887 circa Python 3.12) calls
os.waitpid(pid, 0) which blocks indefinitely if a process created via os.fork()
still holds the tracker pipe's write end. The tracker never sees EOF, never
exits, and the parent hangs at interpreter shutdown.
Fix with two layers:
- **At-fork handler.** An os.register_at_fork(after_in_child=...)
handler closes the inherited pipe fd in the child unless a preserve
flag is set. popen_fork.Popen._launch() sets the flag before its
fork so mp.Process(fork) children keep the fd and reuse the parent's
tracker (preserving gh-80849). Raw os.fork() children close the fd,
letting the parent reap promptly.
- **Timeout safety-net.** _stop_locked() gains a wait_timeout
parameter. When called from `__del__`, it polls with WNOHANG using
exponential backoff for up to 1 second instead of blocking
indefinitely. The at-fork handler makes this unreachable in
well-behaved paths; it remains for abnormal shutdowns.
Co-authored-by: Itamar Oren
---
Lib/multiprocessing/popen_fork.py | 12 +-
Lib/multiprocessing/resource_tracker.py | 89 ++++++++-
Lib/test/_test_multiprocessing.py | 182 +++++++++++++++++-
...-03-22-23-42-22.gh-issue-146313.RtDeAd.rst | 4 +
4 files changed, 279 insertions(+), 8 deletions(-)
create mode 100644 Misc/NEWS.d/next/Library/2026-03-22-23-42-22.gh-issue-146313.RtDeAd.rst
diff --git a/Lib/multiprocessing/popen_fork.py b/Lib/multiprocessing/popen_fork.py
index 7affa1b985f091..a02a53b6a176da 100644
--- a/Lib/multiprocessing/popen_fork.py
+++ b/Lib/multiprocessing/popen_fork.py
@@ -67,7 +67,17 @@ def _launch(self, process_obj):
code = 1
parent_r, child_w = os.pipe()
child_r, parent_w = os.pipe()
- self.pid = os.fork()
+ # gh-146313: Tell the resource tracker's at-fork handler to keep
+ # the inherited pipe fd so this child reuses the parent's tracker
+ # (gh-80849) rather than closing it and launching its own.
+ from .resource_tracker import _fork_intent
+ _fork_intent.preserve_fd = True
+ try:
+ self.pid = os.fork()
+ finally:
+ # Reset in both parent and child so the flag does not leak
+ # into a subsequent raw os.fork() or nested Process launch.
+ _fork_intent.preserve_fd = False
if self.pid == 0:
try:
atexit._clear()
diff --git a/Lib/multiprocessing/resource_tracker.py b/Lib/multiprocessing/resource_tracker.py
index 3606d1effb495b..d3328a8c6170a6 100644
--- a/Lib/multiprocessing/resource_tracker.py
+++ b/Lib/multiprocessing/resource_tracker.py
@@ -20,6 +20,7 @@
import signal
import sys
import threading
+import time
import warnings
from collections import deque
@@ -75,6 +76,10 @@ def __init__(self):
# The reader should understand all formats.
self._use_simple_format = False
+ # Set to True by _stop_locked() if the waitpid polling loop ran to
+ # its timeout without reaping the tracker. Exposed for tests.
+ self._waitpid_timed_out = False
+
def _reentrant_call_error(self):
# gh-109629: this happens if an explicit call to the ResourceTracker
# gets interrupted by a garbage collection, invoking a finalizer (*)
@@ -87,16 +92,51 @@ def __del__(self):
# making sure child processess are cleaned before ResourceTracker
# gets destructed.
# see https://github.com/python/cpython/issues/88887
- self._stop(use_blocking_lock=False)
+ # gh-146313: use a timeout to avoid deadlocking if a forked child
+ # still holds the pipe's write end open.
+ self._stop(use_blocking_lock=False, wait_timeout=1.0)
+
+ def _after_fork_in_child(self):
+ # gh-146313: Called in the child right after os.fork().
+ #
+ # The tracker process is a child of the *parent*, not of us, so we
+ # could never waitpid() it anyway. Clearing _pid means our __del__
+ # becomes a no-op (the early return for _pid is None).
+ #
+ # Whether we keep the inherited _fd depends on who forked us:
+ #
+ # - multiprocessing.Process with the 'fork' start method sets
+ # _fork_intent.preserve_fd before forking. The child keeps the
+ # fd and reuses the parent's tracker (gh-80849). This is safe
+ # because multiprocessing's atexit handler joins all children
+ # before the parent's __del__ runs, so by then the fd copies
+ # are gone and the parent can reap the tracker promptly.
+ #
+ # - A raw os.fork() leaves the flag unset. We close the fd in the child after forking so
+ # the parent's __del__ can reap the tracker without waiting
+ # for the child to exit. If we later need a tracker, ensure_running()
+ # will launch a fresh one.
+ self._lock._at_fork_reinit()
+ self._reentrant_messages.clear()
+ self._pid = None
+ self._exitcode = None
+ if (self._fd is not None and
+ not getattr(_fork_intent, 'preserve_fd', False)):
+ fd = self._fd
+ self._fd = None
+ try:
+ os.close(fd)
+ except OSError:
+ pass
- def _stop(self, use_blocking_lock=True):
+ def _stop(self, use_blocking_lock=True, wait_timeout=None):
if use_blocking_lock:
with self._lock:
- self._stop_locked()
+ self._stop_locked(wait_timeout=wait_timeout)
else:
acquired = self._lock.acquire(blocking=False)
try:
- self._stop_locked()
+ self._stop_locked(wait_timeout=wait_timeout)
finally:
if acquired:
self._lock.release()
@@ -106,6 +146,10 @@ def _stop_locked(
close=os.close,
waitpid=os.waitpid,
waitstatus_to_exitcode=os.waitstatus_to_exitcode,
+ monotonic=time.monotonic,
+ sleep=time.sleep,
+ WNOHANG=getattr(os, 'WNOHANG', None),
+ wait_timeout=None,
):
# This shouldn't happen (it might when called by a finalizer)
# so we check for it anyway.
@@ -122,7 +166,30 @@ def _stop_locked(
self._fd = None
try:
- _, status = waitpid(self._pid, 0)
+ if wait_timeout is None:
+ _, status = waitpid(self._pid, 0)
+ else:
+ # gh-146313: A forked child may still hold the pipe's write
+ # end open, preventing the tracker from seeing EOF and
+ # exiting. Poll with WNOHANG to avoid blocking forever.
+ deadline = monotonic() + wait_timeout
+ delay = 0.001
+ while True:
+ result_pid, status = waitpid(self._pid, WNOHANG)
+ if result_pid != 0:
+ break
+ remaining = deadline - monotonic()
+ if remaining <= 0:
+ # The tracker is still running; it will be
+ # reparented to PID 1 (or the nearest subreaper)
+ # when we exit, and reaped there once all pipe
+ # holders release their fd.
+ self._pid = None
+ self._exitcode = None
+ self._waitpid_timed_out = True
+ return
+ delay = min(delay * 2, remaining, 0.1)
+ sleep(delay)
except ChildProcessError:
self._pid = None
self._exitcode = None
@@ -308,12 +375,24 @@ def _send(self, cmd, name, rtype):
self._ensure_running_and_write(msg)
+# gh-146313: Per-thread flag set by .popen_fork.Popen._launch() just before
+# os.fork(), telling _after_fork_in_child() to keep the inherited pipe fd so
+# the child can reuse this tracker (gh-80849). Unset for raw os.fork() calls,
+# where the child instead closes the fd so the parent's __del__ can reap the
+# tracker. Using threading.local() keeps multiple threads calling
+# popen_fork.Popen._launch() at once from clobbering eachothers intent.
+_fork_intent = threading.local()
+
_resource_tracker = ResourceTracker()
ensure_running = _resource_tracker.ensure_running
register = _resource_tracker.register
unregister = _resource_tracker.unregister
getfd = _resource_tracker.getfd
+# gh-146313: See _after_fork_in_child docstring.
+if hasattr(os, 'register_at_fork'):
+ os.register_at_fork(after_in_child=_resource_tracker._after_fork_in_child)
+
def _decode_message(line):
if line.startswith(b'{'):
diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py
index 69174cff699115..580d9f2b32544e 100644
--- a/Lib/test/_test_multiprocessing.py
+++ b/Lib/test/_test_multiprocessing.py
@@ -6321,8 +6321,9 @@ def test_resource_tracker_sigkill(self):
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
- # The pid should be None in the child process, expect for the fork
- # context. It should not be a new value.
+ # The pid should be None in the child (the at-fork handler clears
+ # it for fork; spawn/forkserver children never had it set). It
+ # should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
@@ -6408,6 +6409,183 @@ def test_resource_tracker_blocked_signals(self):
# restore sigmask to what it was before executing test
signal.pthread_sigmask(signal.SIG_SETMASK, orig_sigmask)
+ @only_run_in_forkserver_testsuite("avoids redundant testing.")
+ def test_resource_tracker_fork_deadlock(self):
+ # gh-146313: ResourceTracker.__del__ used to deadlock if a forked
+ # child still held the pipe's write end open when the parent
+ # exited, because the parent would block in waitpid() waiting for
+ # the tracker to exit, but the tracker would never see EOF.
+ cmd = '''if 1:
+ import os, signal
+ from multiprocessing.resource_tracker import ensure_running
+ ensure_running()
+ if os.fork() == 0:
+ signal.pause()
+ os._exit(0)
+ # parent falls through and exits, triggering __del__
+ '''
+ proc = subprocess.Popen([sys.executable, '-c', cmd],
+ start_new_session=True)
+ try:
+ try:
+ proc.wait(timeout=support.SHORT_TIMEOUT)
+ except subprocess.TimeoutExpired:
+ self.fail(
+ "Parent process deadlocked in ResourceTracker.__del__"
+ )
+ self.assertEqual(proc.returncode, 0)
+ finally:
+ try:
+ os.killpg(proc.pid, signal.SIGKILL)
+ except ProcessLookupError:
+ pass
+ proc.wait()
+
+ @only_run_in_forkserver_testsuite("avoids redundant testing.")
+ def test_resource_tracker_mp_fork_reuse_and_prompt_reap(self):
+ # gh-146313 / gh-80849: A child started via multiprocessing.Process
+ # with the 'fork' start method should reuse the parent's resource
+ # tracker (the at-fork handler preserves the inherited pipe fd),
+ # *and* the parent should be able to reap the tracker promptly
+ # after joining the child, without hitting the waitpid timeout.
+ cmd = textwrap.dedent('''
+ import multiprocessing as mp
+ from multiprocessing.resource_tracker import _resource_tracker
+
+ def child(conn):
+ # Prove we can talk to the parent's tracker by registering
+ # and unregistering a dummy resource over the inherited fd.
+ # If the fd were closed, ensure_running would launch a new
+ # tracker and _pid would be non-None.
+ _resource_tracker.register("x", "dummy")
+ _resource_tracker.unregister("x", "dummy")
+ conn.send((_resource_tracker._fd is not None,
+ _resource_tracker._pid is None,
+ _resource_tracker._check_alive()))
+
+ if __name__ == "__main__":
+ mp.set_start_method("fork")
+ _resource_tracker.ensure_running()
+ r, w = mp.Pipe(duplex=False)
+ p = mp.Process(target=child, args=(w,))
+ p.start()
+ child_has_fd, child_pid_none, child_alive = r.recv()
+ p.join()
+ w.close(); r.close()
+
+ # Now simulate __del__: the child has exited and released
+ # its fd copy, so the tracker should see EOF and exit
+ # promptly -- no timeout.
+ _resource_tracker._stop(wait_timeout=5.0)
+ print(child_has_fd, child_pid_none, child_alive,
+ _resource_tracker._waitpid_timed_out,
+ _resource_tracker._exitcode)
+ ''')
+ rc, out, err = script_helper.assert_python_ok('-c', cmd)
+ parts = out.decode().split()
+ self.assertEqual(parts, ['True', 'True', 'True', 'False', '0'],
+ f"unexpected: {parts!r} stderr={err!r}")
+
+ @only_run_in_forkserver_testsuite("avoids redundant testing.")
+ def test_resource_tracker_raw_fork_prompt_reap(self):
+ # gh-146313: After a raw os.fork() the at-fork handler closes the
+ # child's inherited fd, so the parent can reap the tracker
+ # immediately -- even while the child is still alive -- rather
+ # than waiting out the 1s timeout.
+ cmd = textwrap.dedent('''
+ import os, signal
+ from multiprocessing.resource_tracker import _resource_tracker
+
+ _resource_tracker.ensure_running()
+ r, w = os.pipe()
+ pid = os.fork()
+ if pid == 0:
+ os.close(r)
+ # Report whether our fd was closed by the at-fork handler.
+ os.write(w, b"1" if _resource_tracker._fd is None else b"0")
+ os.close(w)
+ signal.pause() # stay alive so parent's reap is meaningful
+ os._exit(0)
+ os.close(w)
+ child_fd_closed = os.read(r, 1) == b"1"
+ os.close(r)
+
+ # Child is still alive and paused. Because it closed its fd
+ # copy, our close below is the last one and the tracker exits.
+ _resource_tracker._stop(wait_timeout=5.0)
+
+ os.kill(pid, signal.SIGKILL)
+ os.waitpid(pid, 0)
+ print(child_fd_closed,
+ _resource_tracker._waitpid_timed_out,
+ _resource_tracker._exitcode)
+ ''')
+ rc, out, err = script_helper.assert_python_ok('-c', cmd)
+ parts = out.decode().split()
+ self.assertEqual(parts, ['True', 'False', '0'],
+ f"unexpected: {parts!r} stderr={err!r}")
+
+ @only_run_in_forkserver_testsuite("avoids redundant testing.")
+ def test_resource_tracker_lock_reinit_after_fork(self):
+ # gh-146313: If a parent thread held the tracker's lock at fork
+ # time, the child would inherit the held lock and deadlock on
+ # its next ensure_running(). The at-fork handler reinits it.
+ cmd = textwrap.dedent('''
+ import os, threading
+ from multiprocessing.resource_tracker import _resource_tracker
+
+ held = threading.Event()
+ release = threading.Event()
+ def hold():
+ with _resource_tracker._lock:
+ held.set()
+ release.wait()
+ t = threading.Thread(target=hold)
+ t.start()
+ held.wait()
+
+ pid = os.fork()
+ if pid == 0:
+ ok = _resource_tracker._lock.acquire(timeout=5.0)
+ os._exit(0 if ok else 1)
+
+ release.set()
+ t.join()
+ _, status = os.waitpid(pid, 0)
+ print(os.waitstatus_to_exitcode(status))
+ ''')
+ rc, out, err = script_helper.assert_python_ok(
+ '-W', 'ignore::DeprecationWarning', '-c', cmd)
+ self.assertEqual(out.strip(), b'0',
+ f"child failed to acquire lock: stderr={err!r}")
+
+ @only_run_in_forkserver_testsuite("avoids redundant testing.")
+ def test_resource_tracker_safety_net_timeout(self):
+ # gh-146313: When an mp.Process(fork) child holds the preserved
+ # fd and the parent calls _stop() without joining (simulating
+ # abnormal shutdown), the safety-net timeout should fire rather
+ # than deadlocking.
+ cmd = textwrap.dedent('''
+ import multiprocessing as mp
+ import signal
+ from multiprocessing.resource_tracker import _resource_tracker
+
+ if __name__ == "__main__":
+ mp.set_start_method("fork")
+ _resource_tracker.ensure_running()
+ p = mp.Process(target=signal.pause)
+ p.start()
+ # Stop WITHOUT joining -- child still holds preserved fd
+ _resource_tracker._stop(wait_timeout=0.5)
+ print(_resource_tracker._waitpid_timed_out)
+ p.terminate()
+ p.join()
+ ''')
+ rc, out, err = script_helper.assert_python_ok('-c', cmd)
+ self.assertEqual(out.strip(), b'True',
+ f"safety-net timeout did not fire: stderr={err!r}")
+
+
class TestSimpleQueue(unittest.TestCase):
@classmethod
diff --git a/Misc/NEWS.d/next/Library/2026-03-22-23-42-22.gh-issue-146313.RtDeAd.rst b/Misc/NEWS.d/next/Library/2026-03-22-23-42-22.gh-issue-146313.RtDeAd.rst
new file mode 100644
index 00000000000000..1beea3694c422e
--- /dev/null
+++ b/Misc/NEWS.d/next/Library/2026-03-22-23-42-22.gh-issue-146313.RtDeAd.rst
@@ -0,0 +1,4 @@
+Fix a deadlock in :mod:`multiprocessing`'s resource tracker
+where the parent process could hang indefinitely in :func:`os.waitpid`
+during interpreter shutdown if a child created via :func:`os.fork` still
+held the resource tracker's pipe open.
From 30c698a655bafdb53db0d39426a2b5cfce3b19ca Mon Sep 17 00:00:00 2001
From: Neko Asakura
Date: Sun, 12 Apr 2026 08:54:03 -0400
Subject: [PATCH 3/6] gh-148398: add type watcher in `_CHECK_ATTR_CLASS`
(GH-148399)
---
.../2026-04-11-15-12-53.gh-issue-148398.g62jCA.rst | 1 +
Python/optimizer_bytecodes.c | 4 ++++
Python/optimizer_cases.c.h | 4 ++++
3 files changed, 9 insertions(+)
create mode 100644 Misc/NEWS.d/next/Core_and_Builtins/2026-04-11-15-12-53.gh-issue-148398.g62jCA.rst
diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2026-04-11-15-12-53.gh-issue-148398.g62jCA.rst b/Misc/NEWS.d/next/Core_and_Builtins/2026-04-11-15-12-53.gh-issue-148398.g62jCA.rst
new file mode 100644
index 00000000000000..0a7ba0b27d9708
--- /dev/null
+++ b/Misc/NEWS.d/next/Core_and_Builtins/2026-04-11-15-12-53.gh-issue-148398.g62jCA.rst
@@ -0,0 +1 @@
+Fix a bug in the JIT optimizer where class attribute loads were not invalidated after type mutation.
diff --git a/Python/optimizer_bytecodes.c b/Python/optimizer_bytecodes.c
index c12a4f4131bc7e..39dc4877af8884 100644
--- a/Python/optimizer_bytecodes.c
+++ b/Python/optimizer_bytecodes.c
@@ -226,6 +226,10 @@ dummy_func(void) {
}
else {
sym_set_const(owner, type);
+ if ((((PyTypeObject *)type)->tp_flags & Py_TPFLAGS_IMMUTABLETYPE) == 0) {
+ PyType_Watch(TYPE_WATCHER_ID, type);
+ _Py_BloomFilter_Add(dependencies, type);
+ }
}
}
}
diff --git a/Python/optimizer_cases.c.h b/Python/optimizer_cases.c.h
index 2db2c87cb3610b..746653906874b5 100644
--- a/Python/optimizer_cases.c.h
+++ b/Python/optimizer_cases.c.h
@@ -2571,6 +2571,10 @@
}
else {
sym_set_const(owner, type);
+ if ((((PyTypeObject *)type)->tp_flags & Py_TPFLAGS_IMMUTABLETYPE) == 0) {
+ PyType_Watch(TYPE_WATCHER_ID, type);
+ _Py_BloomFilter_Add(dependencies, type);
+ }
}
}
break;
From 03d2f035d6dcfeee0c39b4b8a3ddb4dda01cddb6 Mon Sep 17 00:00:00 2001
From: Wulian233 <1055917385@qq.com>
Date: Sun, 12 Apr 2026 21:07:19 +0800
Subject: [PATCH 4/6] gh-131798: Add `_CHECK_IS_NOT_PY_CALLABLE` to the JIT
optimizer (GH-148434)
---
Lib/test/test_capi/test_opt.py | 15 +++++++++++++++
Python/optimizer_bytecodes.c | 7 +++++++
Python/optimizer_cases.c.h | 6 ++++++
3 files changed, 28 insertions(+)
diff --git a/Lib/test/test_capi/test_opt.py b/Lib/test/test_capi/test_opt.py
index f11413cc625422..2678a620763b4b 100644
--- a/Lib/test/test_capi/test_opt.py
+++ b/Lib/test/test_capi/test_opt.py
@@ -2755,6 +2755,21 @@ def testfunc(n):
self.assertNotIn("_GUARD_TOS_INT", uops)
self.assertIn("_POP_TOP_NOP", uops)
+ def test_check_is_not_py_callable(self):
+ def testfunc(n):
+ total = 0
+ f = len
+ xs = (1, 2, 3)
+ for _ in range(n):
+ total += f(xs)
+ return total
+
+ res, ex = self._run_with_optimizer(testfunc, TIER2_THRESHOLD)
+ self.assertEqual(res, 3 * TIER2_THRESHOLD)
+ self.assertIsNotNone(ex)
+ uops = get_opnames(ex)
+ self.assertNotIn("_CHECK_IS_NOT_PY_CALLABLE", uops)
+
def test_call_len_string(self):
def testfunc(n):
for _ in range(n):
diff --git a/Python/optimizer_bytecodes.c b/Python/optimizer_bytecodes.c
index 39dc4877af8884..6e5af4793419f2 100644
--- a/Python/optimizer_bytecodes.c
+++ b/Python/optimizer_bytecodes.c
@@ -1217,6 +1217,13 @@ dummy_func(void) {
(void)framesize;
}
+ op(_CHECK_IS_NOT_PY_CALLABLE, (callable, unused, unused[oparg] -- callable, unused, unused[oparg])) {
+ PyTypeObject *type = sym_get_type(callable);
+ if (type && type != &PyFunction_Type && type != &PyMethod_Type) {
+ ADD_OP(_NOP, 0, 0);
+ }
+ }
+
op(_PUSH_FRAME, (new_frame -- )) {
SYNC_SP();
if (!CURRENT_FRAME_IS_INIT_SHIM()) {
diff --git a/Python/optimizer_cases.c.h b/Python/optimizer_cases.c.h
index 746653906874b5..d90ad285a11f33 100644
--- a/Python/optimizer_cases.c.h
+++ b/Python/optimizer_cases.c.h
@@ -3651,6 +3651,12 @@
}
case _CHECK_IS_NOT_PY_CALLABLE: {
+ JitOptRef callable;
+ callable = stack_pointer[-2 - oparg];
+ PyTypeObject *type = sym_get_type(callable);
+ if (type && type != &PyFunction_Type && type != &PyMethod_Type) {
+ ADD_OP(_NOP, 0, 0);
+ }
break;
}
From 3ab94d684286b49144bf2e43cc1041f3e4c0cda8 Mon Sep 17 00:00:00 2001
From: Sam Gross
Date: Sun, 12 Apr 2026 10:40:41 -0400
Subject: [PATCH 5/6] gh-148393: Use atomic ops on _ma_watcher_tag in free
threading build (gh-148397)
Fixes data races between dict mutation and watch/unwatch on the same dict.
---
Include/internal/pycore_dict.h | 4 ++--
.../internal/pycore_pyatomic_ft_wrappers.h | 12 ++++++++++
Lib/test/test_free_threading/test_dict.py | 23 +++++++++++++++++++
...-04-11-17-28-52.gh-issue-148393.lX6gwN.rst | 2 ++
Objects/dictobject.c | 4 ++--
Python/optimizer_analysis.c | 5 ++--
6 files changed, 44 insertions(+), 6 deletions(-)
create mode 100644 Misc/NEWS.d/next/Core_and_Builtins/2026-04-11-17-28-52.gh-issue-148393.lX6gwN.rst
diff --git a/Include/internal/pycore_dict.h b/Include/internal/pycore_dict.h
index d58539fa846563..5bbea187394db6 100644
--- a/Include/internal/pycore_dict.h
+++ b/Include/internal/pycore_dict.h
@@ -292,7 +292,7 @@ _PyDict_NotifyEvent(PyDict_WatchEvent event,
PyObject *value)
{
assert(Py_REFCNT((PyObject*)mp) > 0);
- int watcher_bits = mp->_ma_watcher_tag & DICT_WATCHER_MASK;
+ int watcher_bits = FT_ATOMIC_LOAD_UINT64_RELAXED(mp->_ma_watcher_tag) & DICT_WATCHER_MASK;
if (watcher_bits) {
RARE_EVENT_STAT_INC(watched_dict_modification);
_PyDict_SendEvent(watcher_bits, event, mp, key, value);
@@ -368,7 +368,7 @@ PyDictObject *_PyObject_MaterializeManagedDict_LockHeld(PyObject *);
static inline Py_ssize_t
_PyDict_UniqueId(PyDictObject *mp)
{
- return (Py_ssize_t)(mp->_ma_watcher_tag >> DICT_UNIQUE_ID_SHIFT);
+ return (Py_ssize_t)(FT_ATOMIC_LOAD_UINT64_RELAXED(mp->_ma_watcher_tag) >> DICT_UNIQUE_ID_SHIFT);
}
static inline void
diff --git a/Include/internal/pycore_pyatomic_ft_wrappers.h b/Include/internal/pycore_pyatomic_ft_wrappers.h
index c0f859a23e10b8..3155481bb5c36b 100644
--- a/Include/internal/pycore_pyatomic_ft_wrappers.h
+++ b/Include/internal/pycore_pyatomic_ft_wrappers.h
@@ -49,6 +49,8 @@ extern "C" {
_Py_atomic_load_uint16_relaxed(&value)
#define FT_ATOMIC_LOAD_UINT32_RELAXED(value) \
_Py_atomic_load_uint32_relaxed(&value)
+#define FT_ATOMIC_LOAD_UINT64_RELAXED(value) \
+ _Py_atomic_load_uint64_relaxed(&value)
#define FT_ATOMIC_LOAD_ULONG_RELAXED(value) \
_Py_atomic_load_ulong_relaxed(&value)
#define FT_ATOMIC_STORE_PTR_RELAXED(value, new_value) \
@@ -71,6 +73,12 @@ extern "C" {
_Py_atomic_store_uint16_relaxed(&value, new_value)
#define FT_ATOMIC_STORE_UINT32_RELAXED(value, new_value) \
_Py_atomic_store_uint32_relaxed(&value, new_value)
+#define FT_ATOMIC_AND_UINT64(value, new_value) \
+ (void)_Py_atomic_and_uint64(&value, new_value)
+#define FT_ATOMIC_OR_UINT64(value, new_value) \
+ (void)_Py_atomic_or_uint64(&value, new_value)
+#define FT_ATOMIC_ADD_UINT64(value, new_value) \
+ (void)_Py_atomic_add_uint64(&value, new_value)
#define FT_ATOMIC_STORE_CHAR_RELAXED(value, new_value) \
_Py_atomic_store_char_relaxed(&value, new_value)
#define FT_ATOMIC_LOAD_CHAR_RELAXED(value) \
@@ -146,6 +154,7 @@ extern "C" {
#define FT_ATOMIC_LOAD_UINT8_RELAXED(value) value
#define FT_ATOMIC_LOAD_UINT16_RELAXED(value) value
#define FT_ATOMIC_LOAD_UINT32_RELAXED(value) value
+#define FT_ATOMIC_LOAD_UINT64_RELAXED(value) value
#define FT_ATOMIC_LOAD_ULONG_RELAXED(value) value
#define FT_ATOMIC_STORE_PTR_RELAXED(value, new_value) value = new_value
#define FT_ATOMIC_STORE_PTR_RELEASE(value, new_value) value = new_value
@@ -157,6 +166,9 @@ extern "C" {
#define FT_ATOMIC_STORE_UINT8_RELAXED(value, new_value) value = new_value
#define FT_ATOMIC_STORE_UINT16_RELAXED(value, new_value) value = new_value
#define FT_ATOMIC_STORE_UINT32_RELAXED(value, new_value) value = new_value
+#define FT_ATOMIC_AND_UINT64(value, new_value) (void)(value &= new_value)
+#define FT_ATOMIC_OR_UINT64(value, new_value) (void)(value |= new_value)
+#define FT_ATOMIC_ADD_UINT64(value, new_value) (void)(value += new_value)
#define FT_ATOMIC_LOAD_CHAR_RELAXED(value) value
#define FT_ATOMIC_STORE_CHAR_RELAXED(value, new_value) value = new_value
#define FT_ATOMIC_LOAD_UCHAR_RELAXED(value) value
diff --git a/Lib/test/test_free_threading/test_dict.py b/Lib/test/test_free_threading/test_dict.py
index 1ffd924e9f477a..55272a00c3ad50 100644
--- a/Lib/test/test_free_threading/test_dict.py
+++ b/Lib/test/test_free_threading/test_dict.py
@@ -245,6 +245,29 @@ def reader():
with threading_helper.start_threads([t1, t2]):
pass
+ @unittest.skipIf(_testcapi is None, "requires _testcapi")
+ def test_racing_watch_unwatch_dict(self):
+ # gh-148393: race between PyDict_Watch / PyDict_Unwatch
+ # and concurrent dict mutation reading _ma_watcher_tag.
+ wid = _testcapi.add_dict_watcher(0)
+ try:
+ d = {}
+ ITERS = 1000
+
+ def writer():
+ for i in range(ITERS):
+ d[i] = i
+ del d[i]
+
+ def watcher():
+ for _ in range(ITERS):
+ _testcapi.watch_dict(wid, d)
+ _testcapi.unwatch_dict(wid, d)
+
+ threading_helper.run_concurrently([writer, watcher])
+ finally:
+ _testcapi.clear_dict_watcher(wid)
+
def test_racing_dict_update_and_method_lookup(self):
# gh-144295: test race between dict modifications and method lookups.
# Uses BytesIO because the race requires a type without Py_TPFLAGS_INLINE_VALUES
diff --git a/Misc/NEWS.d/next/Core_and_Builtins/2026-04-11-17-28-52.gh-issue-148393.lX6gwN.rst b/Misc/NEWS.d/next/Core_and_Builtins/2026-04-11-17-28-52.gh-issue-148393.lX6gwN.rst
new file mode 100644
index 00000000000000..33c4b75bfb944c
--- /dev/null
+++ b/Misc/NEWS.d/next/Core_and_Builtins/2026-04-11-17-28-52.gh-issue-148393.lX6gwN.rst
@@ -0,0 +1,2 @@
+Fix data races between :c:func:`PyDict_Watch` / :c:func:`PyDict_Unwatch`
+and concurrent dict mutation in the :term:`free-threaded build`.
diff --git a/Objects/dictobject.c b/Objects/dictobject.c
index 67bc4319e0bae2..b5300eb410c69c 100644
--- a/Objects/dictobject.c
+++ b/Objects/dictobject.c
@@ -8028,7 +8028,7 @@ PyDict_Watch(int watcher_id, PyObject* dict)
if (validate_watcher_id(interp, watcher_id)) {
return -1;
}
- ((PyDictObject*)dict)->_ma_watcher_tag |= (1LL << watcher_id);
+ FT_ATOMIC_OR_UINT64(((PyDictObject*)dict)->_ma_watcher_tag, (1LL << watcher_id));
return 0;
}
@@ -8043,7 +8043,7 @@ PyDict_Unwatch(int watcher_id, PyObject* dict)
if (validate_watcher_id(interp, watcher_id)) {
return -1;
}
- ((PyDictObject*)dict)->_ma_watcher_tag &= ~(1LL << watcher_id);
+ FT_ATOMIC_AND_UINT64(((PyDictObject*)dict)->_ma_watcher_tag, ~(1LL << watcher_id));
return 0;
}
diff --git a/Python/optimizer_analysis.c b/Python/optimizer_analysis.c
index ca9bcc8a40c35e..6742488a0d06c2 100644
--- a/Python/optimizer_analysis.c
+++ b/Python/optimizer_analysis.c
@@ -119,14 +119,15 @@ static int
get_mutations(PyObject* dict) {
assert(PyDict_CheckExact(dict));
PyDictObject *d = (PyDictObject *)dict;
- return (d->_ma_watcher_tag >> DICT_MAX_WATCHERS) & ((1 << DICT_WATCHED_MUTATION_BITS)-1);
+ uint64_t tag = FT_ATOMIC_LOAD_UINT64_RELAXED(d->_ma_watcher_tag);
+ return (tag >> DICT_MAX_WATCHERS) & ((1 << DICT_WATCHED_MUTATION_BITS) - 1);
}
static void
increment_mutations(PyObject* dict) {
assert(PyDict_CheckExact(dict));
PyDictObject *d = (PyDictObject *)dict;
- d->_ma_watcher_tag += (1 << DICT_MAX_WATCHERS);
+ FT_ATOMIC_ADD_UINT64(d->_ma_watcher_tag, (1 << DICT_MAX_WATCHERS));
}
/* The first two dict watcher IDs are reserved for CPython,
From 1e79bf6c05d057c033c396e58d20618d80d392f2 Mon Sep 17 00:00:00 2001
From: Locked-chess-official <13140752715@163.com>
Date: Mon, 13 Apr 2026 01:06:41 +0800
Subject: [PATCH 6/6] gh-139551: add support for BaseExceptionGroup in IDLE
(GH-139563)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Meaningfully render ExceptionGroup tracebacks in the IDLE GUI REPL.
---------
Co-authored-by: Bénédikt Tran <10796600+picnixz@users.noreply.github.com>
Co-authored-by: Gregory P. Smith
---
Lib/idlelib/idle_test/test_run.py | 93 ++++++++++++++++++
Lib/idlelib/run.py | 97 +++++++++++++++----
...-10-05-19-33-39.gh-issue-139551.TX9BRc.rst | 1 +
3 files changed, 174 insertions(+), 17 deletions(-)
create mode 100644 Misc/NEWS.d/next/IDLE/2025-10-05-19-33-39.gh-issue-139551.TX9BRc.rst
diff --git a/Lib/idlelib/idle_test/test_run.py b/Lib/idlelib/idle_test/test_run.py
index 9a9d3b7b4e219c..57bf5559c0fa88 100644
--- a/Lib/idlelib/idle_test/test_run.py
+++ b/Lib/idlelib/idle_test/test_run.py
@@ -82,6 +82,99 @@ def test_get_multiple_message(self, mock):
subtests += 1
self.assertEqual(subtests, len(data2)) # All subtests ran?
+ def _capture_exception(self):
+ """Call run.print_exception() and return its stderr output."""
+ with captured_stderr() as output:
+ with mock.patch.object(run, 'cleanup_traceback') as ct:
+ ct.side_effect = lambda t, e: t
+ run.print_exception()
+ return output.getvalue()
+
+ @force_not_colorized
+ def test_print_exception_group_nested(self):
+ try:
+ try:
+ raise ExceptionGroup('inner', [ValueError('v1')])
+ except ExceptionGroup as inner:
+ raise ExceptionGroup('outer', [inner, TypeError('t1')])
+ except ExceptionGroup:
+ tb = self._capture_exception()
+
+ self.assertIn('ExceptionGroup: outer (2 sub-exceptions)', tb)
+ self.assertIn('ExceptionGroup: inner', tb)
+ self.assertIn('ValueError: v1', tb)
+ self.assertIn('TypeError: t1', tb)
+ # Verify tree structure characters.
+ self.assertIn('+-+---------------- 1 ----------------', tb)
+ self.assertIn('+---------------- 2 ----------------', tb)
+ self.assertIn('+------------------------------------', tb)
+
+ @force_not_colorized
+ def test_print_exception_group_chaining(self):
+ # __cause__ on a sub-exception exercises the prefixed
+ # chaining-message path (margin chars on separator lines).
+ sub = TypeError('t1')
+ sub.__cause__ = ValueError('original')
+ try:
+ raise ExceptionGroup('eg1', [sub])
+ except ExceptionGroup:
+ tb = self._capture_exception()
+ self.assertIn('ValueError: original', tb)
+ self.assertIn('| The above exception was the direct cause', tb)
+ self.assertIn('ExceptionGroup: eg1', tb)
+
+ # __context__ (implicit chaining) on a sub-exception.
+ sub = TypeError('t2')
+ sub.__context__ = ValueError('first')
+ try:
+ raise ExceptionGroup('eg2', [sub])
+ except ExceptionGroup:
+ tb = self._capture_exception()
+ self.assertIn('ValueError: first', tb)
+ self.assertIn('| During handling of the above exception', tb)
+ self.assertIn('ExceptionGroup: eg2', tb)
+
+ @force_not_colorized
+ def test_print_exception_group_seen(self):
+ shared = ValueError('shared')
+ try:
+ raise ExceptionGroup('eg', [shared, shared])
+ except ExceptionGroup:
+ tb = self._capture_exception()
+
+ self.assertIn('ValueError: shared', tb)
+ self.assertIn('', tb)
+
+ @force_not_colorized
+ def test_print_exception_group_max_width(self):
+ excs = [ValueError(f'v{i}') for i in range(20)]
+ try:
+ raise ExceptionGroup('eg', excs)
+ except ExceptionGroup:
+ tb = self._capture_exception()
+
+ self.assertIn('+---------------- 15 ----------------', tb)
+ self.assertIn('+---------------- ... ----------------', tb)
+ self.assertIn('and 5 more exceptions', tb)
+ self.assertNotIn('+---------------- 16 ----------------', tb)
+
+ @force_not_colorized
+ def test_print_exception_group_max_depth(self):
+ def make_nested(depth):
+ if depth == 0:
+ return ValueError('leaf')
+ return ExceptionGroup(f'level{depth}',
+ [make_nested(depth - 1)])
+
+ try:
+ raise make_nested(15)
+ except ExceptionGroup:
+ tb = self._capture_exception()
+
+ self.assertIn('... (max_group_depth is 10)', tb)
+ self.assertIn('ExceptionGroup: level15', tb)
+ self.assertNotIn('ValueError: leaf', tb)
+
# StdioFile tests.
class S(str):
diff --git a/Lib/idlelib/run.py b/Lib/idlelib/run.py
index a30db99a619a93..e1c40fee8f4805 100644
--- a/Lib/idlelib/run.py
+++ b/Lib/idlelib/run.py
@@ -249,31 +249,94 @@ def print_exception():
sys.last_type, sys.last_value, sys.last_traceback = excinfo
sys.last_exc = val
seen = set()
+ exclude = ("run.py", "rpc.py", "threading.py", "queue.py",
+ "debugger_r.py", "bdb.py")
+ max_group_width = 15
+ max_group_depth = 10
+ group_depth = 0
+
+ def print_exc_group(typ, exc, tb, prefix=""):
+ nonlocal group_depth
+ group_depth += 1
+ prefix2 = prefix or " "
+ if group_depth > max_group_depth:
+ print(f"{prefix2}| ... (max_group_depth is {max_group_depth})",
+ file=efile)
+ group_depth -= 1
+ return
+ if tb:
+ if not prefix:
+ print(" + Exception Group Traceback (most recent call last):", file=efile)
+ else:
+ print(f"{prefix}| Exception Group Traceback (most recent call last):", file=efile)
+ tbe = traceback.extract_tb(tb)
+ cleanup_traceback(tbe, exclude)
+ for line in traceback.format_list(tbe):
+ for subline in line.rstrip().splitlines():
+ print(f"{prefix2}| {subline}", file=efile)
+ lines = get_message_lines(typ, exc, tb)
+ for line in lines:
+ print(f"{prefix2}| {line}", end="", file=efile)
+ num_excs = len(exc.exceptions)
+ if num_excs <= max_group_width:
+ n = num_excs
+ else:
+ n = max_group_width + 1
+ for i, sub in enumerate(exc.exceptions[:n], 1):
+ truncated = (i > max_group_width)
+ first_line_pre = "+-" if i == 1 else " "
+ title = str(i) if not truncated else '...'
+ print(f"{prefix2}{first_line_pre}+---------------- {title} ----------------", file=efile)
+ if truncated:
+ remaining = num_excs - max_group_width
+ plural = 's' if remaining > 1 else ''
+ print(f"{prefix2} | and {remaining} more exception{plural}",
+ file=efile)
+ need_print_underline = True
+ elif id(sub) not in seen:
+ if not prefix:
+ print_exc(type(sub), sub, sub.__traceback__, " ")
+ else:
+ print_exc(type(sub), sub, sub.__traceback__, prefix + " ")
+ need_print_underline = not isinstance(sub, BaseExceptionGroup)
+ else:
+ print(f"{prefix2} | ", file=efile)
+ need_print_underline = True
+ if need_print_underline and i == n:
+ print(f"{prefix2} +------------------------------------", file=efile)
+ group_depth -= 1
- def print_exc(typ, exc, tb):
+ def print_exc(typ, exc, tb, prefix=""):
seen.add(id(exc))
context = exc.__context__
cause = exc.__cause__
+ prefix2 = f"{prefix}| " if prefix else ""
if cause is not None and id(cause) not in seen:
- print_exc(type(cause), cause, cause.__traceback__)
- print("\nThe above exception was the direct cause "
- "of the following exception:\n", file=efile)
+ print_exc(type(cause), cause, cause.__traceback__, prefix)
+ print(f"{prefix2}\n{prefix2}The above exception was the direct cause "
+ f"of the following exception:\n{prefix2}", file=efile)
elif (context is not None and
not exc.__suppress_context__ and
id(context) not in seen):
- print_exc(type(context), context, context.__traceback__)
- print("\nDuring handling of the above exception, "
- "another exception occurred:\n", file=efile)
- if tb:
- tbe = traceback.extract_tb(tb)
- print('Traceback (most recent call last):', file=efile)
- exclude = ("run.py", "rpc.py", "threading.py", "queue.py",
- "debugger_r.py", "bdb.py")
- cleanup_traceback(tbe, exclude)
- traceback.print_list(tbe, file=efile)
- lines = get_message_lines(typ, exc, tb)
- for line in lines:
- print(line, end='', file=efile)
+ print_exc(type(context), context, context.__traceback__, prefix)
+ print(f"{prefix2}\n{prefix2}During handling of the above exception, "
+ f"another exception occurred:\n{prefix2}", file=efile)
+ if isinstance(exc, BaseExceptionGroup):
+ print_exc_group(typ, exc, tb, prefix=prefix)
+ else:
+ if tb:
+ print(f"{prefix2}Traceback (most recent call last):", file=efile)
+ tbe = traceback.extract_tb(tb)
+ cleanup_traceback(tbe, exclude)
+ if prefix:
+ for line in traceback.format_list(tbe):
+ for subline in line.rstrip().splitlines():
+ print(f"{prefix}| {subline}", file=efile)
+ else:
+ traceback.print_list(tbe, file=efile)
+ lines = get_message_lines(typ, exc, tb)
+ for line in lines:
+ print(f"{prefix2}{line}", end="", file=efile)
print_exc(typ, val, tb)
diff --git a/Misc/NEWS.d/next/IDLE/2025-10-05-19-33-39.gh-issue-139551.TX9BRc.rst b/Misc/NEWS.d/next/IDLE/2025-10-05-19-33-39.gh-issue-139551.TX9BRc.rst
new file mode 100644
index 00000000000000..5ea1dfc9b5100d
--- /dev/null
+++ b/Misc/NEWS.d/next/IDLE/2025-10-05-19-33-39.gh-issue-139551.TX9BRc.rst
@@ -0,0 +1 @@
+Support rendering :exc:`BaseExceptionGroup` in IDLE.