@@ -493,7 +493,7 @@ static noinline void start_vram_transfer(struct psx_gpu *gpu, uint32_t pos_word,
493
493
uint32_t size_word , int is_read )
494
494
{
495
495
if (gpu -> dma .h )
496
- log_anomaly ("start_vram_transfer while old unfinished\n" );
496
+ log_anomaly (gpu , "start_vram_transfer while old unfinished\n" );
497
497
498
498
gpu -> dma .x = pos_word & 0x3ff ;
499
499
gpu -> dma .y = (pos_word >> 16 ) & 0x1ff ;
@@ -511,7 +511,7 @@ static noinline void start_vram_transfer(struct psx_gpu *gpu, uint32_t pos_word,
511
511
gpu -> state .last_vram_read_frame = * gpu -> state .frame_count ;
512
512
}
513
513
514
- log_io ("start_vram_transfer %c (%d, %d) %dx%d\n" , is_read ? 'r' : 'w' ,
514
+ log_io (gpu , "start_vram_transfer %c (%d, %d) %dx%d\n" , is_read ? 'r' : 'w' ,
515
515
gpu -> dma .x , gpu -> dma .y , gpu -> dma .w , gpu -> dma .h );
516
516
if (gpu -> gpu_state_change )
517
517
gpu -> gpu_state_change (PGS_VRAM_TRANSFER_START );
@@ -532,7 +532,7 @@ static void finish_vram_transfer(struct psx_gpu *gpu, int is_read)
532
532
not_dirty |= dma_r - gpu -> screen .src_x - 1 ;
533
533
not_dirty |= dma_b - gpu -> screen .src_y - 1 ;
534
534
not_dirty >>= 31 ;
535
- log_io ("dma %3d,%3d %dx%d scr %3d,%3d %3dx%3d -> dirty %d\n" ,
535
+ log_io (gpu , "dma %3d,%3d %dx%d scr %3d,%3d %3dx%3d -> dirty %d\n" ,
536
536
gpu -> dma_start .x , gpu -> dma_start .y , gpu -> dma_start .w , gpu -> dma_start .h ,
537
537
gpu -> screen .src_x , gpu -> screen .src_y , gpu -> screen .hres , gpu -> screen .vres , !not_dirty );
538
538
gpu -> state .fb_dirty |= !not_dirty ;
@@ -715,7 +715,7 @@ static noinline int do_cmd_buffer(struct psx_gpu *gpu, uint32_t *data, int count
715
715
if (cmd == 2 )
716
716
break ;
717
717
if (cmd == 0x1f )
718
- log_anomaly ("irq1?\n" );
718
+ log_anomaly (gpu , "irq1?\n" );
719
719
pos ++ ;
720
720
continue ;
721
721
}
@@ -764,19 +764,19 @@ void GPUwriteDataMem(uint32_t *mem, int count)
764
764
{
765
765
int dummy = 0 , left ;
766
766
767
- log_io ("gpu_dma_write %p %d\n" , mem , count );
767
+ log_io (& gpu , "gpu_dma_write %p %d\n" , mem , count );
768
768
769
769
if (unlikely (gpu .cmd_len > 0 ))
770
770
flush_cmd_buffer (& gpu );
771
771
772
772
left = do_cmd_buffer (& gpu , mem , count , & dummy , & dummy );
773
773
if (left )
774
- log_anomaly ("GPUwriteDataMem: discarded %d/%d words\n" , left , count );
774
+ log_anomaly (& gpu , "GPUwriteDataMem: discarded %d/%d words\n" , left , count );
775
775
}
776
776
777
777
void GPUwriteData (uint32_t data )
778
778
{
779
- log_io ("gpu_write %08x\n" , data );
779
+ log_io (& gpu , "gpu_write %08x\n" , data );
780
780
gpu .cmd_buffer [gpu .cmd_len ++ ] = HTOLE32 (data );
781
781
if (gpu .cmd_len >= CMD_BUFFER_LEN )
782
782
flush_cmd_buffer (& gpu );
@@ -795,7 +795,7 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr,
795
795
if (unlikely (gpu .cmd_len > 0 ))
796
796
flush_cmd_buffer (& gpu );
797
797
798
- log_io ("gpu_dma_chain\n" );
798
+ log_io (& gpu , "gpu_dma_chain\n" );
799
799
addr = ld_addr = start_addr & 0xffffff ;
800
800
for (count = 0 ; (addr & 0x800000 ) == 0 ; count ++ )
801
801
{
@@ -808,11 +808,11 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr,
808
808
if (len > 0 )
809
809
cpu_cycles_sum += 5 + len ;
810
810
811
- log_io (".chain %08lx #%d+%d %u+%u\n" ,
811
+ log_io (& gpu , ".chain %08lx #%d+%d %u+%u\n" ,
812
812
(long )(list - rambase ) * 4 , len , gpu .cmd_len , cpu_cycles_sum , cpu_cycles_last );
813
813
if (unlikely (gpu .cmd_len > 0 )) {
814
814
if (gpu .cmd_len + len > ARRAY_SIZE (gpu .cmd_buffer )) {
815
- log_anomaly ("cmd_buffer overflow, likely garbage commands\n" );
815
+ log_anomaly (& gpu , "cmd_buffer overflow, likely garbage commands\n" );
816
816
gpu .cmd_len = 0 ;
817
817
}
818
818
memcpy (gpu .cmd_buffer + gpu .cmd_len , list + 1 , len * 4 );
@@ -826,7 +826,7 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr,
826
826
if (left ) {
827
827
memcpy (gpu .cmd_buffer , list + 1 + len - left , left * 4 );
828
828
gpu .cmd_len = left ;
829
- log_anomaly ("GPUdmaChain: %d/%d words left\n" , left , len );
829
+ log_anomaly (& gpu , "GPUdmaChain: %d/%d words left\n" , left , len );
830
830
}
831
831
}
832
832
@@ -835,7 +835,7 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr,
835
835
break ;
836
836
}
837
837
if (addr == ld_addr ) {
838
- log_anomaly ("GPUdmaChain: loop @ %08x, cnt=%u\n" , addr , count );
838
+ log_anomaly (& gpu , "GPUdmaChain: loop @ %08x, cnt=%u\n" , addr , count );
839
839
break ;
840
840
}
841
841
if (count == ld_count ) {
@@ -856,7 +856,7 @@ long GPUdmaChain(uint32_t *rambase, uint32_t start_addr,
856
856
857
857
void GPUreadDataMem (uint32_t * mem , int count )
858
858
{
859
- log_io ("gpu_dma_read %p %d\n" , mem , count );
859
+ log_io (& gpu , "gpu_dma_read %p %d\n" , mem , count );
860
860
861
861
if (unlikely (gpu .cmd_len > 0 ))
862
862
flush_cmd_buffer (& gpu );
@@ -879,7 +879,7 @@ uint32_t GPUreadData(void)
879
879
ret = LE32TOH (ret );
880
880
}
881
881
882
- log_io ("gpu_read %08x\n" , ret );
882
+ log_io (& gpu , "gpu_read %08x\n" , ret );
883
883
return ret ;
884
884
}
885
885
@@ -891,7 +891,7 @@ uint32_t GPUreadStatus(void)
891
891
flush_cmd_buffer (& gpu );
892
892
893
893
ret = gpu .status ;
894
- log_io ("gpu_read_status %08x\n" , ret );
894
+ log_io (& gpu , "gpu_read_status %08x\n" , ret );
895
895
return ret ;
896
896
}
897
897
0 commit comments