@@ -112,26 +112,6 @@ mark_executable(unsigned char *memory, size_t size)
112112 return 0 ;
113113}
114114
115- static int
116- mark_readable (unsigned char * memory , size_t size )
117- {
118- if (size == 0 ) {
119- return 0 ;
120- }
121- assert (size % get_page_size () == 0 );
122- #ifdef MS_WINDOWS
123- DWORD old ;
124- int failed = !VirtualProtect (memory , size , PAGE_READONLY , & old );
125- #else
126- int failed = mprotect (memory , size , PROT_READ );
127- #endif
128- if (failed ) {
129- jit_error ("unable to protect readable memory" );
130- return -1 ;
131- }
132- return 0 ;
133- }
134-
135115// JIT compiler stuff: /////////////////////////////////////////////////////////
136116
137117// Warning! AArch64 requires you to get your hands dirty. These are your gloves:
@@ -409,12 +389,14 @@ _PyJIT_Compile(_PyExecutorObject *executor, const _PyUOpInstruction *trace, size
409389 code_size += group -> code .body_size ;
410390 data_size += group -> data .body_size ;
411391 }
412- // Round up to the nearest page (code and data need separate pages):
392+ code_size += stencil_groups [_FATAL_ERROR ].code .body_size ;
393+ data_size += stencil_groups [_FATAL_ERROR ].data .body_size ;
394+ // Round up to the nearest page:
413395 size_t page_size = get_page_size ();
414396 assert ((page_size & (page_size - 1 )) == 0 );
415- code_size + = page_size - (code_size & (page_size - 1 ));
416- data_size += page_size - ( data_size & ( page_size - 1 )) ;
417- unsigned char * memory = jit_alloc (code_size + data_size );
397+ size_t padding = page_size - (( code_size + data_size ) & (page_size - 1 ));
398+ size_t total_size = code_size + data_size + padding ;
399+ unsigned char * memory = jit_alloc (total_size );
418400 if (memory == NULL ) {
419401 return -1 ;
420402 }
@@ -444,14 +426,26 @@ _PyJIT_Compile(_PyExecutorObject *executor, const _PyUOpInstruction *trace, size
444426 code += group -> code .body_size ;
445427 data += group -> data .body_size ;
446428 }
447- if (mark_executable (memory , code_size ) ||
448- mark_readable (memory + code_size , data_size ))
449- {
450- jit_free (memory , code_size + data_size );
429+ // Protect against accidental buffer overrun into data:
430+ const StencilGroup * group = & stencil_groups [_FATAL_ERROR ];
431+ uint64_t patches [] = GET_PATCHES ();
432+ patches [HoleValue_CODE ] = (uint64_t )code ;
433+ patches [HoleValue_CONTINUE ] = (uint64_t )code ;
434+ patches [HoleValue_DATA ] = (uint64_t )data ;
435+ patches [HoleValue_EXECUTOR ] = (uint64_t )executor ;
436+ patches [HoleValue_TOP ] = (uint64_t )code ;
437+ patches [HoleValue_ZERO ] = 0 ;
438+ emit (group , patches );
439+ code += group -> code .body_size ;
440+ data += group -> data .body_size ;
441+ assert (code == memory + code_size );
442+ assert (data == memory + code_size + data_size );
443+ if (mark_executable (memory , total_size )) {
444+ jit_free (memory , total_size );
451445 return -1 ;
452446 }
453447 executor -> jit_code = memory ;
454- executor -> jit_size = code_size + data_size ;
448+ executor -> jit_size = total_size ;
455449 return 0 ;
456450}
457451
0 commit comments