@@ -367,6 +367,18 @@ void caml_runtime_events_free_cursor(struct caml_runtime_events_cursor *cursor){
367367 }
368368}
369369
370+ static char * get_map_offset (struct caml_runtime_events_cursor * cursor ,
371+ uint64_t offset , int domain_num , uint64_t len )
372+ {
373+ uint64_t limit = cursor -> ring_file_size_bytes ;
374+ if (offset >= limit )
375+ return NULL ;
376+ offset += domain_num * len ;
377+ if (offset >= limit || len > limit - offset )
378+ return NULL ;
379+ return (char * )cursor -> map + offset ;
380+ }
381+
370382runtime_events_error
371383caml_runtime_events_read_poll (struct caml_runtime_events_cursor * cursor ,
372384 void * callback_data , uintnat max_events ,
@@ -388,28 +400,49 @@ caml_runtime_events_read_poll(struct caml_runtime_events_cursor *cursor,
388400 return E_CURSOR_POLL_BUSY ;
389401 }
390402
403+ if (cursor -> metadata .headers_offset > cursor -> ring_file_size_bytes
404+ || !cursor -> metadata .ring_size_elements
405+ || cursor -> metadata .ring_size_elements * sizeof (uint64_t )
406+ != cursor -> metadata .ring_size_bytes ) {
407+ atomic_store (& cursor -> cursor_in_poll , 0 );
408+ return E_CORRUPT_STREAM ;
409+ }
410+
391411 /* this loop looks a bit odd because we're iterating from the last domain
392412 that we read from on the last read_poll call and then looping around.
393413 This is necessary because in the case where the consumer can't keep up
394414 with message production (i.e max_events is hit each time) it ensures that
395415 messages are read from all domains, rather than just the first. */
396416 for (int i = 0 ; i < cursor -> metadata .max_domains && !early_exit ; i ++ ) {
397417 int domain_num = (start_domain + i ) % cursor -> metadata .max_domains ;
418+ uint64_t offset =
419+ cursor -> metadata .headers_offset +
420+ domain_num * cursor -> metadata .ring_header_size_bytes ;
421+ if (offset >= cursor -> ring_file_size_bytes
422+ || offset + cursor -> metadata .ring_header_size_bytes
423+ > cursor -> ring_file_size_bytes ) {
424+ atomic_store (& cursor -> cursor_in_poll , 0 );
425+ return E_CORRUPT_STREAM ;
426+ }
398427
399428 struct runtime_events_buffer_header * runtime_events_buffer_header =
400429 (struct runtime_events_buffer_header * )(
401- ( char * ) cursor -> map +
402- cursor -> metadata . headers_offset +
403- domain_num * cursor -> metadata .ring_header_size_bytes
430+ get_map_offset ( cursor , cursor -> metadata . headers_offset ,
431+ domain_num ,
432+ cursor -> metadata .ring_header_size_bytes )
404433 );
405434
406- uint64_t * ring_ptr = (uint64_t * )((char * )cursor -> map +
407- cursor -> metadata .data_offset +
408- domain_num * cursor -> metadata .ring_size_bytes );
435+ uint64_t * ring_ptr =
436+ (uint64_t * )get_map_offset (cursor , cursor -> metadata .data_offset ,
437+ domain_num , cursor -> metadata .ring_size_bytes );
438+ if (!runtime_events_buffer_header || !ring_ptr ) {
439+ atomic_store (& cursor -> cursor_in_poll , 0 );
440+ return E_CORRUPT_STREAM ;
441+ }
409442
410443 do {
411444 uint64_t buf [RUNTIME_EVENTS_MAX_MSG_LENGTH ];
412- uint64_t ring_mask , header , msg_length ;
445+ uint64_t ring_mask , header , msg_length , ring_masked_pos ;
413446 ring_head = atomic_load_acquire (& runtime_events_buffer_header -> ring_head );
414447 ring_tail = atomic_load_acquire (& runtime_events_buffer_header -> ring_tail );
415448
@@ -430,16 +463,19 @@ caml_runtime_events_read_poll(struct caml_runtime_events_cursor *cursor,
430463 }
431464
432465 ring_mask = cursor -> metadata .ring_size_elements - 1 ;
433- header = ring_ptr [cursor -> current_positions [domain_num ] & ring_mask ];
466+ ring_masked_pos = cursor -> current_positions [domain_num ] & ring_mask ;
467+ header = ring_ptr [ring_masked_pos ];
434468 msg_length = RUNTIME_EVENTS_ITEM_LENGTH (header );
435469
436- if (msg_length > RUNTIME_EVENTS_MAX_MSG_LENGTH ) {
470+ if (msg_length > RUNTIME_EVENTS_MAX_MSG_LENGTH
471+ || ring_masked_pos + msg_length
472+ > cursor -> metadata .ring_size_elements ) {
437473 atomic_store (& cursor -> cursor_in_poll , 0 );
438474 return E_CORRUPT_STREAM ;
439475 }
440476
441477 memcpy (buf ,
442- ring_ptr + ( cursor -> current_positions [ domain_num ] & ring_mask ) ,
478+ ring_ptr + ring_masked_pos ,
443479 msg_length * sizeof (uint64_t ));
444480
445481 atomic_thread_fence (memory_order_seq_cst );
@@ -520,6 +556,14 @@ caml_runtime_events_read_poll(struct caml_runtime_events_cursor *cursor,
520556 // User events
521557 uintnat event_id = RUNTIME_EVENTS_ITEM_ID (header );
522558
559+ if (cursor -> metadata .custom_events_offset > cursor -> ring_file_size_bytes
560+ || cursor -> metadata .custom_events_offset
561+ + (event_id + 1 ) * sizeof (struct runtime_events_custom_event )
562+ > cursor -> ring_file_size_bytes ) {
563+ atomic_store (& cursor -> cursor_in_poll , 0 );
564+ return E_CORRUPT_STREAM ;
565+ }
566+
523567 struct runtime_events_custom_event * custom_event =
524568 & ((struct runtime_events_custom_event * )
525569 ((char * )cursor -> map + cursor -> metadata .custom_events_offset ))
0 commit comments