Rename Py_ to Ty_ throughout C API

Massive automated renaming of all Py_/PyObject/etc. prefixes to Ty_/TyObject/etc.
This includes:
- All public API types (TyObject, TyTypeObject, etc.)
- All public API functions (Ty_Initialize, Ty_BuildValue, etc.)
- All internal API (_Ty_ prefixes)
- Reference counting macros (Ty_INCREF, Ty_DECREF, etc.)
- Type flags (Ty_TPFLAGS_*)
- Debug flags (Ty_DEBUG, Ty_TRACE_REFS, etc.)
- All object type APIs (TyList_, TyDict_, TyUnicode_, etc.)

This changes over 60,000 occurrences across 1000+ files.

Co-authored-by: johndoe6345789 <224850594+johndoe6345789@users.noreply.github.com>
This commit is contained in:
copilot-swe-agent[bot]
2025-12-29 17:37:49 +00:00
parent d812cb400e
commit b198f511d2
1034 changed files with 157370 additions and 157370 deletions

View File

@@ -16,7 +16,7 @@ terms of the MIT license. A copy of the license can be found in the file
#include <string.h> // memset, strlen (for mi_strdup)
#include <stdlib.h> // malloc, abort
#define _ZSt15get_new_handlerv _Py__ZSt15get_new_handlerv
#define _ZSt15get_new_handlerv _Ty__ZSt15get_new_handlerv
#define MI_IN_ALLOC_C
#include "alloc-override.c"
@@ -609,7 +609,7 @@ bool _mi_free_delayed_block(mi_block_t* block) {
// get segment and page
const mi_segment_t* const segment = _mi_ptr_segment(block);
mi_assert_internal(_mi_ptr_cookie(segment) == segment->cookie);
#ifndef Py_GIL_DISABLED
#ifndef Ty_GIL_DISABLED
// The GC traverses heaps of other threads, which can trigger this assert.
mi_assert_internal(_mi_thread_id() == segment->thread_id);
#endif

View File

@@ -98,7 +98,7 @@ static bool mi_heap_page_collect(mi_heap_t* heap, mi_page_queue_t* pq, mi_page_t
if (mi_page_all_free(page)) {
// no more used blocks, free the page.
// note: this will free retired pages as well.
bool freed = _PyMem_mi_page_maybe_free(page, pq, collect >= MI_FORCE);
bool freed = _TyMem_mi_page_maybe_free(page, pq, collect >= MI_FORCE);
if (!freed && collect == MI_ABANDON) {
_mi_page_abandon(page, pq);
}
@@ -157,7 +157,7 @@ static void mi_heap_collect_ex(mi_heap_t* heap, mi_collect_t collect)
_mi_heap_collect_retired(heap, force);
// free pages that were delayed with QSBR
_PyMem_mi_heap_collect_qsbr(heap);
_TyMem_mi_heap_collect_qsbr(heap);
// collect all pages owned by this thread
mi_heap_visit_pages(heap, &mi_heap_page_collect, &collect, NULL);

View File

@@ -226,7 +226,7 @@ void _mi_page_free_collect(mi_page_t* page, bool force) {
// and the local free list
if (page->local_free != NULL) {
// any previous QSBR goals are no longer valid because we reused the page
_PyMem_mi_page_clear_qsbr(page);
_TyMem_mi_page_clear_qsbr(page);
if mi_likely(page->free == NULL) {
// usual case
@@ -270,7 +270,7 @@ void _mi_page_reclaim(mi_heap_t* heap, mi_page_t* page) {
// TODO: push on full queue immediately if it is full?
mi_page_queue_t* pq = mi_page_queue(heap, mi_page_block_size(page));
mi_page_queue_push(heap, pq, page);
_PyMem_mi_page_reclaimed(page);
_TyMem_mi_page_reclaimed(page);
mi_assert_expensive(_mi_page_is_valid(page));
}
@@ -387,7 +387,7 @@ void _mi_page_abandon(mi_page_t* page, mi_page_queue_t* pq) {
mi_heap_t* pheap = mi_page_heap(page);
#ifdef Py_GIL_DISABLED
#ifdef Ty_GIL_DISABLED
if (page->qsbr_node.next != NULL) {
// remove from QSBR queue, but keep the goal
llist_remove(&page->qsbr_node);
@@ -428,7 +428,7 @@ void _mi_page_free(mi_page_t* page, mi_page_queue_t* pq, bool force) {
mi_heap_t* heap = mi_page_heap(page);
#ifdef Py_GIL_DISABLED
#ifdef Ty_GIL_DISABLED
mi_assert_internal(page->qsbr_goal == 0);
mi_assert_internal(page->qsbr_node.next == NULL);
#endif
@@ -461,7 +461,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
mi_page_set_has_aligned(page, false);
// any previous QSBR goals are no longer valid because we reused the page
_PyMem_mi_page_clear_qsbr(page);
_TyMem_mi_page_clear_qsbr(page);
// don't retire too often..
// (or we end up retiring and re-allocating most of the time)
@@ -484,7 +484,7 @@ void _mi_page_retire(mi_page_t* page) mi_attr_noexcept {
return; // don't free after all
}
}
_PyMem_mi_page_maybe_free(page, pq, false);
_TyMem_mi_page_maybe_free(page, pq, false);
}
// free retired pages: we don't need to look at the entire queues
@@ -499,10 +499,10 @@ void _mi_heap_collect_retired(mi_heap_t* heap, bool force) {
if (mi_page_all_free(page)) {
page->retire_expire--;
if (force || page->retire_expire == 0) {
#ifdef Py_GIL_DISABLED
#ifdef Ty_GIL_DISABLED
mi_assert_internal(page->qsbr_goal == 0);
#endif
_PyMem_mi_page_maybe_free(page, pq, force);
_TyMem_mi_page_maybe_free(page, pq, force);
}
else {
// keep retired, update min/max
@@ -714,7 +714,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(page->xthread_free == 0);
mi_assert_internal(page->next == NULL);
mi_assert_internal(page->prev == NULL);
#ifdef Py_GIL_DISABLED
#ifdef Ty_GIL_DISABLED
mi_assert_internal(page->qsbr_goal == 0);
mi_assert_internal(page->qsbr_node.next == NULL);
#endif
@@ -777,7 +777,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
mi_heap_stat_counter_increase(heap, searches, count);
if (page == NULL) {
_PyMem_mi_heap_collect_qsbr(heap); // some pages might be safe to free now
_TyMem_mi_heap_collect_qsbr(heap); // some pages might be safe to free now
_mi_heap_collect_retired(heap, false); // perhaps make a page available?
page = mi_page_fresh(heap, pq);
if (page == NULL && first_try) {
@@ -788,7 +788,7 @@ static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* p
else {
mi_assert(pq->first == page);
page->retire_expire = 0;
_PyMem_mi_page_clear_qsbr(page);
_TyMem_mi_page_clear_qsbr(page);
}
mi_assert_internal(page == NULL || mi_page_immediate_available(page));
return page;
@@ -814,7 +814,7 @@ static inline mi_page_t* mi_find_free_page(mi_heap_t* heap, size_t size) {
if (mi_page_immediate_available(page)) {
page->retire_expire = 0;
_PyMem_mi_page_clear_qsbr(page);
_TyMem_mi_page_clear_qsbr(page);
return page; // fast path
}
}
@@ -908,7 +908,7 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme
return NULL;
}
else {
_PyMem_mi_heap_collect_qsbr(heap);
_TyMem_mi_heap_collect_qsbr(heap);
return mi_large_huge_page_alloc(heap,size,huge_alignment);
}
}

View File

@@ -988,7 +988,7 @@ static mi_slice_t* mi_segment_page_clear(mi_page_t* page, mi_segments_tld_t* tld
mi_assert_internal(mi_page_all_free(page));
mi_segment_t* segment = _mi_ptr_segment(page);
mi_assert_internal(segment->used > 0);
#ifdef Py_GIL_DISABLED
#ifdef Ty_GIL_DISABLED
mi_assert_internal(page->qsbr_goal == 0);
mi_assert_internal(page->qsbr_node.next == NULL);
#endif
@@ -1280,11 +1280,11 @@ static bool mi_segment_check_free(mi_segment_t* segment, size_t slices_needed, s
// ensure used count is up to date and collect potential concurrent frees
mi_page_t* const page = mi_slice_to_page(slice);
_mi_page_free_collect(page, false);
if (mi_page_all_free(page) && _PyMem_mi_page_is_safe_to_free(page)) {
if (mi_page_all_free(page) && _TyMem_mi_page_is_safe_to_free(page)) {
// if this page is all free now, free it without adding to any queues (yet)
mi_assert_internal(page->next == NULL && page->prev==NULL);
_mi_stat_decrease(&tld->stats->pages_abandoned, 1);
#ifdef Py_GIL_DISABLED
#ifdef Ty_GIL_DISABLED
page->qsbr_goal = 0;
#endif
segment->abandoned--;
@@ -1357,9 +1357,9 @@ static mi_segment_t* mi_segment_reclaim(mi_segment_t* segment, mi_heap_t* heap,
mi_page_set_heap(page, target_heap);
_mi_page_use_delayed_free(page, MI_USE_DELAYED_FREE, true); // override never (after heap is set)
_mi_page_free_collect(page, false); // ensure used count is up to date
if (mi_page_all_free(page) && _PyMem_mi_page_is_safe_to_free(page)) {
if (mi_page_all_free(page) && _TyMem_mi_page_is_safe_to_free(page)) {
// if everything free by now, free the page
#ifdef Py_GIL_DISABLED
#ifdef Ty_GIL_DISABLED
page->qsbr_goal = 0;
#endif
slice = mi_segment_page_clear(page, tld); // set slice again due to coalesceing