-
Notifications
You must be signed in to change notification settings - Fork 25
/
z80mode.asm
3247 lines (3080 loc) · 59.1 KB
/
z80mode.asm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
z80code:
.assume adl=0
.org 0
CPU_SPEED_START()
active_ints:
.db 0
hdma_line_counter:
.db 0
waitloop_sentinel:
.db 0
.block $08-$
; Input: DE=Game Boy HL
; Output: UHL=direct read pointer, or implementation is patched
; Destroys: F'
r_get_hl_read_ptr:
ex af,af'
get_hl_read_ptr_swapped:
ld h,mem_read_lut >> 8
ld l,d
ld l,(hl)
; Implicitly reset Z flag to indicate a patchable caller
inc h ;mem_get_ptr_routines
jp (hl)
.block $10-$
; Input: DE=Game Boy HL
; Output: UHL=direct read/write pointer, or implementation is patched
; Destroys: F'
r_get_hl_readwrite_ptr:
ex af,af'
get_hl_readwrite_ptr_swapped:
ld h,mem_write_lut >> 8
ld l,d
ld l,(hl)
; Implicitly reset Z flag to indicate a patchable caller
dec h ;mem_get_ptr_routines
jp (hl)
.block $18-$
r_event:
pop hl
dec hl
event_value = $+1
ld (hl),0
jp do_event
.block $20-$
r_get_hl_hram_ptr:
ex af,af'
inc de
ld hl,$007F
add hl,de
dec de
jr nc,_
ld h,d
ld l,e
ex af,af'
ret
_
jp unpatch_hl_hram_access
;.block $28-$
; Taken by previous routine
.block $30-$
r_call:
ex af,af'
dec iyl
jp m,do_call
jr do_push_overflow_for_call
.block $38-$
r_cycle_check:
rst38h:
exx
inc d
exx
ret nz
ex (sp),ix
jr nc,cycle_overflow_for_jump
inc ix
lea hl,ix
exx
inc bc ;BCU=0
ld c,a
ld b,(ix-4)
ld de,(ix-8)
#ifdef VALIDATE_SCHEDULE
call.il schedule_subblock_event_helper
#else
jp.lil schedule_subblock_event_helper
#endif
cycle_overflow_for_jump:
ld hl,(ix+2)
push hl
exx
inc bc ;BCU=0
ld c,a
ld a,-3
sub (ix-3)
ld b,a
ld de,(ix+4)
pop ix
#ifdef VALIDATE_SCHEDULE
call.il schedule_jump_event_helper_adjusted
#else
jp.lil schedule_jump_event_helper_adjusted
#endif
do_call_no_shadow_stack:
push bc
do_push_overflow_for_call_slow:
push hl
ld e,6 ; Cycles for taken CALL
exx
push hl
call do_push_for_call_slow_swap
ex af,af'
pop hl
exx
pop hl
pop bc
; Remove JIT return address from callstack cache, while preserving
; the Game Boy return address in HL
inc sp
inc sp
; Count cycles for taken CALL
add a,c
jr nc,do_call_dispatch
inc d
jr nz,do_call_dispatch
ex de,hl
ld l,a
ld a,c
jr cycle_overflow_for_call_pushed
do_call_callstack_overflow:
call callstack_overflow_helper
scf
jr do_call_dispatch_push
#ifdef SHADOW_STACK
do_call_set_shadow_stack:
call set_shadow_stack
jr do_call_shadow_stack_smc
#endif
do_push_overflow_for_call:
push bc
push hl
exx
push hl
call shift_stack_window_lower
pop hl
exx
pop hl
jr c,do_push_overflow_for_call_slow
pop bc
do_call:
do_call_shadow_stack_smc = $
; Push Game Boy return address to the stack
do_call_push_offset_smc_1 = $+3
ld.l (iy),h
dec iyl
do_call_push_offset_smc_2 = $+3
ld.l (iy),l
push.l hl ; Cache Game Boy return address on callstack
; Count cycles for taken CALL
add a,c
jr c,do_call_maybe_cycle_overflow
do_call_no_cycle_overflow:
; Check for callstack overflow
ld hl,(-call_stack_lower_bound) & $FFFF
add hl,sp
jr nc,do_call_callstack_overflow
do_call_dispatch_push:
; Push RET cycle count and stack offset
ld c,iyl
push bc
do_call_dispatch:
; Dispatch to JIT target
; Carry is set to indicate to the decoder that the callstack cache was used,
; or reset if not. This indicates how to retrieve the JIT address.
exx
ex af,af'
jp (hl)
do_rom_bank_call:
exx
ex af,af'
pop hl
ld e,a
ld b,(hl) ; Cycles for taken RET
inc hl
banked_call_mismatch_continue:
ld c,(hl) ; Cycles for taken CALL
inc hl
push de
ld de,(hl) ; Game Boy return address
inc hl
inc hl
curr_rom_bank = $+1
ld a,0 ; Get current bank
cp (hl)
jr nz,banked_call_mismatch
inc hl
ex (sp),hl
ex de,hl
ld a,e
dec iyl
jp m,do_call
jp do_push_overflow_for_call
banked_call_mismatch:
jp.lil banked_call_mismatch_helper
do_unbanked_call:
exx
ex af,af'
pop hl
ld b,(hl) ; Cycles for taken RET
inc hl
ld c,(hl) ; Cycles for taken CALL
inc hl
push de
ld de,(hl) ; Game Boy return address
inc hl
inc hl
inc hl ; Skip RST_CALL
ex (sp),hl
ex de,hl
dec iyl
jp m,do_call
jp do_push_overflow_for_call
do_call_maybe_cycle_overflow:
inc d
jr nz,do_call_no_cycle_overflow
cycle_overflow_for_call:
ex de,hl
; Check for callstack overflow
ld hl,(-call_stack_lower_bound) & $FFFF
add hl,sp
call nc,callstack_overflow_helper
ld l,a
ld a,c
; Push RET cycle count and stack offset
ld c,iyl
push bc
cycle_overflow_for_call_pushed:
exx
push hl
exx
ex (sp),ix
inc bc ;BCU=0
ld c,l
sub 6
ld b,a
dec de
dec de
#ifdef VALIDATE_SCHEDULE
call.il schedule_call_event_helper
#else
jp.lil schedule_call_event_helper
#endif
jr nz,do_rom_bank_call
do_call_nz:
jr z,skip_cond_call
jr do_unbanked_call
jr z,do_rom_bank_call
do_call_z:
jr nz,skip_cond_call
jr do_unbanked_call
jr nc,do_rom_bank_call
do_call_nc:
jr c,skip_cond_call
jr do_unbanked_call
jr c,do_rom_bank_call
do_call_c:
jr c,do_unbanked_call
skip_cond_call:
pop hl
ex af,af'
add a,(hl) ; Count cycles for taken RET (1 too many)
inc hl \ inc hl \ inc hl \ inc hl \ inc hl
jr c,++_
_
dec a
ex af,af'
jp (hl)
_
jr z,--_
exx
inc d
exx
jr nz,--_
dec a
push hl
exx
ex (sp),ix
inc bc ;BCU=0
ld c,a
ld a,(ix-5)
sub 4
ld b,a
ld de,(ix-3)
#ifdef VALIDATE_SCHEDULE
call.il schedule_event_helper
#else
jp.lil schedule_event_helper
#endif
cycle_overflow_for_bridge:
exx
inc d
exx
ret nz
exx
ex (sp),ix
ld bc,(ix-4-1) ; BCU=0
ld c,a
ld de,(ix+4)
ld ix,(ix+2)
exx
lea hl,ix
exx
#ifdef VALIDATE_SCHEDULE
call.il schedule_event_helper_a
#else
jp.lil schedule_event_helper_a
#endif
do_overlapped_jump:
ex af,af'
pop hl
inc hl
add a,(hl)
inc hl
inc hl
jr c,do_overlapped_jump_maybe_overflow
ex af,af'
jp (hl)
do_rom_bank_jump:
ex af,af'
exx
ld e,a
exx
pop hl
rom_bank_check_smc_1 = $+1
ld a,0
cp (hl)
jr nz,banked_jump_mismatch
inc hl
ld a,(hl)
banked_jump_mismatch_continue:
inc hl
inc hl
exx
add a,e
jr c,++_
_
exx
ex af,af'
jp (hl)
do_overlapped_jump_maybe_overflow:
exx
_
inc d
jr nz,--_
do_slow_jump_overflow_common:
ld e,a
exx
inc hl
push hl
ld hl,(hl)
exx
ex (sp),ix
ld bc,(ix-3) ;BCU=0
ld a,c
add a,b
ld b,a
sub c
ld c,e
ld de,(ix+2)
ld ix,(ix)
#ifdef VALIDATE_SCHEDULE
call.il c,schedule_slow_jump_event_helper
call.il schedule_event_helper
#else
jp.lil c,schedule_slow_jump_event_helper
jp.lil schedule_event_helper
#endif
banked_jump_mismatch:
; Save the new bank index
ld (hl),a
jp.lil banked_jump_mismatch_helper
; Check if an event was scheduled at or before the current memory cycle
; Inputs: DE = cycle count at end of block (only call when D=0)
; C = block-relative cycle offset (negative)
; I = time of next event
; Outputs: DE = updated cycle count at end of block
; I = updated time of next event
; Destroys: AF, B, HL
handle_events_for_mem_access:
ld a,e
add a,c
ret nc
; Save and override the terminating event counter checker, preventing interrupt dispatch
ld hl,(event_counter_checkers_ei_delay)
push hl
ld hl,event_expired_for_mem_access_loop
ld (event_counter_checkers_ei_delay),hl
push ix
; Save the cycle remainder in IX
ld ixl,a
ld ixh,d
; Advance the cycle offsets to after the current cycle
cpl
ld hl,event_cycle_count
add a,(hl)
ld (hl),a
ASSERT_C
ld a,c
cpl
ld e,a
jr do_event_pushed
event_expired_for_mem_access_loop:
ld sp,(event_save_sp)
ld h,b
ld l,c
; Check if there are more events before the memory access
add ix,de
jr c,event_expired_more_events
; Advance the next event time to after the current cycle
sbc hl,de
lea de,ix+1
add hl,de
ld i,hl
pop de
; Restore the memory cycle offset
ld a,e
cpl
ld c,a
pop ix
pop hl
; Restore the terminating event counter checker
ld (event_counter_checkers_ei_delay),hl
ret
start_emulation:
call set_gb_stack
pop hl
exx
ex af,af'
jr event_not_expired_start_emulation
#ifdef DEBUG
event_debug_address:
.db 0
#endif
do_event:
exx
#ifdef DEBUG
ld hl,event_debug_address
ld (event_address),hl
#endif
ex af,af'
event_cycle_count = $+1
ld l,0
sub l
ASSERT_NC
ld e,a
ld h,d
do_event_any:
#ifdef DEBUG
inc h
dec h
jr nz,$
#endif
do_event_any_noassert:
push hl
ex (sp),ix
#ifdef VALIDATE_STACK
ld hl,($AABD - myADLstack) & $FFFF
add.l hl,sp
mlt hl
ld l,h
add hl,hl
ld h,(myz80stack - 4) >> 8
sbc hl,sp
jr nz,$
#endif
do_event_pushed:
push de
#ifdef SCHEDULER_LOG
call.il scheduler_log
#endif
#ifdef 0
push iy
ex af,af'
exx
push af
push bc
push de
push hl
ex af,af'
exx
ld hl,(event_gb_address)
push hl
ld hl,i
push hl
FASTLOG_EVENT_Z80(TRIGGER_EVENT, 18)
dec sp \ dec sp \ dec sp \ dec sp
#endif
event_expired_interrupt_loop:
; Check scheduled events
ld (event_save_sp),sp
event_expired_halt_loop:
ld hl,i ; This clears the carry flag
event_expired_loop:
ld sp,event_counter_checkers
ld b,h
ld c,l
ppu_counter = $+1
ld de,0
adc hl,de
ret z
ex de,hl
ppu_scheduled:
inc sp
inc sp
audio_counter_checker:
audio_counter = $+1
ld hl,0
or a
sbc hl,bc
jp z,audio_expired_handler
add hl,de
ret c
ex de,hl
sbc hl,de
ex de,hl
ret
event_expired_more_events:
or a
sbc hl,de
or a
jr event_expired_loop
cpu_continue_halt:
ld ix,0
jr event_expired_halt_loop
schedule_ei_delay:
; Force an event after one GB cycle
ld de,-1
; Overwrite the function pointer with the following code,
; which will run after the one GB cycle elapses
call event_counter_checkers_done
schedule_ei_delay_startup:
; Enable interrupts, but check for halt spin
; Interrupts may have been enabled by RETI so check specifically for halt mode
ld hl,intstate_smc_2
ld a,(hl)
cp cpu_exit_halt_no_interrupt - (intstate_smc_2 + 1)
ld (hl),trigger_interrupt - (intstate_smc_2 + 1)
jr nz,_
ld (hl),cpu_exit_halt_trigger_interrupt - (intstate_smc_2 + 1)
_
; Restore the default counter checker end pointer
call event_counter_checkers_done
event_counter_checkers_done:
#ifdef VALIDATE_SCHEDULE
;ld de,-1
#endif
ld h,b
ld l,c
add ix,de
jr c,event_expired_more_events
sbc hl,de
ld i,hl
event_save_sp = $+1
; Use this initial value in case the CPU is halted when loading a save state
ld sp,myz80stack-4-4
event_not_expired_start_emulation:
pop de
event_not_expired:
ld hl,(IE)
ld a,l
and h
intstate_smc_2 = $+1
jr nz,trigger_interrupt
cpu_halted_smc = $
add ix,de
ld a,ixl
jr c,event_reschedule
event_no_reschedule:
ld d,ixh
pop ix
exx
ex af,af'
jp (hl)
cpu_exit_halt_no_interrupt:
xor a
ld (intstate_smc_2),a
ld hl,$19DD ; ADD IX,DE
ld (cpu_halted_smc),hl
jr cpu_halted_smc
event_reschedule:
inc bc ;BCU=0
ld b,e
ld c,a
ld de,(event_gb_address)
exx
push hl
pop ix
exx
; This is guaranteed to carry, so the event cannot be now
sub b
#ifdef VALIDATE_SCHEDULE
call.il schedule_event_later
#else
jp.lil schedule_event_later
#endif
trigger_int_callstack_overflow:
call callstack_overflow_helper
jr trigger_int_callstack_overflow_continue
trigger_interrupt_retry_dispatch:
; Count the full dispatch cycles again, without causing another retry
lea de,ix-4
; Skip the push and the first SMC for disabling interrupts, which have
; already been done
ld l,a
; Restore Game Boy BC
pop ix
jr trigger_interrupt_pushed
#ifdef SHADOW_STACK
do_push_for_interrupt_set_shadow_stack:
call set_shadow_stack
jr do_push_for_interrupt_continue
#endif
cpu_exit_halt_trigger_interrupt:
ld bc,$19DD ; ADD IX,DE
ld (cpu_halted_smc),bc
trigger_interrupt:
ld l,a
; Disable interrupts
ld a,$08 ;EX AF,AF'
ld (intstate_smc_1),a
; Get the number of cycles to be taken by RET
rrca ;ld a,4
add a,e
dec iyl
jp p,do_push_overflow_for_interrupt
do_push_for_interrupt_continue:
event_gb_address = $+1
ld bc,event_gb_address
do_push_for_interrupt_shadow_stack_smc = $
trigger_interrupt_push_offset_smc_1 = $+3
ld.l (iy),b
dec iyl
trigger_interrupt_push_offset_smc_2 = $+3
ld.l (iy),c
push.l bc ; Cache Game Boy return address on callstack
; Restore cycle count into DE
lea de,ix
; Restore Game Boy BC
pop ix
exx
; Push JIT return address
push hl
; Check for callstack overflow
ld hl,(-call_stack_lower_bound) & $FFFF
add hl,sp
jr nc,trigger_int_callstack_overflow
trigger_int_callstack_overflow_continue:
exx
; Push stack offset and RET cycle count
ld b,a
ld c,iyl
push bc
trigger_interrupt_pushed:
; More disabling interrupts
xor a
ld (intstate_smc_2),a
; Get the lowest set bit of the active interrupts
sub l
and l
; Clear the IF bit
xor h
ld b,a
; Index the dispatch routines by the interrupt bit times 4
xor h
add a,a
add a,a
; Save the new IF value
sbc hl,hl ;active_ints
ld (hl),b
exx
ld l,a
ld h,dispatch_vblank >> 8
; Get number of cycles to be taken
ld a,(hl)
inc hl
exx
add a,e
jr c,dispatch_int_maybe_overflow
dispatch_int_no_overflow:
exx
ex af,af'
jp (hl)
do_push_overflow_for_interrupt:
push hl
ld d,a ; Preserve the RET cycle count
call shift_stack_window_lower_preserved_a_swapped
exx
ld a,d
pop hl
jr nc,do_push_for_interrupt_continue
do_push_for_interrupt_no_shadow_stack:
; Restore cycle count into DE and restore Game Boy BC
ex (sp),ix
pop de
ld a,e
ld hl,(event_gb_address)
ld e,5 ; Cycles for interrupt dispatch
call do_push_for_call_slow
ex af,af'
exx
ld e,a
; Re-check the active interrupts
ld hl,(IE)
ld a,l
and h
ld l,a
jr trigger_interrupt_pushed
dispatch_int_maybe_overflow:
inc d
jr nz,dispatch_int_no_overflow
ld c,a
; Check if an event was scheduled during the first 4 cycles of dispatch
ld a,e
sub -4
jr nc,dispatch_int_handle_events
cpl
add a,c
ld b,a
inc bc \ dec bc ;BCU=0
exx
ld a,l
inc hl
ld hl,(hl)
push hl
exx
ex (sp),ix
add a,10*4-1
rra
ld l,a
ld h,dispatch_vblank >> 8
ld de,(hl)
#ifdef VALIDATE_SCHEDULE
call.il schedule_event_helper
#else
jp.lil schedule_event_helper
#endif
dispatch_int_handle_events:
; Set IX to the 4-cycle-added value
push ix
ld ixl,a
ld ixh,0
; Restore the original value of IF
exx
ld a,l
exx
dec a
rrca
rrca
xor b
ld (active_ints),a
; Set the restoring interrupt trigger
ld a,trigger_interrupt_retry_dispatch - (intstate_smc_2 + 1)
ld (intstate_smc_2),a
; SP may have been adjusted by the callstack push, so set the new SP restore value
push de
jp event_expired_interrupt_loop
; This is called when a CALL, RST, or interrupt occurs
; which exceeds the defined callstack limit.
; Inputs: SPL = myADLstack - (CALL_STACK_DEPTH * CALL_STACK_ENTRY_SIZE_ADL) - 3
; (SPL) = value to preserve on ADL callstack
; SPS = myz80stack - 4 - (CALL_STACK_DEPTH * CALL_STACK_ENTRY_SIZE_Z80) - 4
; (SPS) = return value
; (SPS+2) = value to preserve on Z80 callstack
; Outputs: SPL = myADLstack
; SPS = myz80stack - 4 - 2
; (SPS) = preserved Z80 callstack value
; Destroys: HL
callstack_overflow_helper:
pop.l hl
ld.lil sp,myADLstack
push.l hl
pop hl
ld (callstack_overflow_helper_smc),hl
#ifdef FASTLOG
push af
push hl
FASTLOG_EVENT_Z80(CALLSTACK_OVERFLOW, 2)
pop af
#endif
pop hl
ld sp,myz80stack - 4
push hl
callstack_overflow_helper_smc = $+1
jp 0
ppu_mode2_line_0_lyc_match:
; The LYC match bit was already set by the scheduled LYC event
; Just transition from mode 1 to mode 2
inc a
ld (hl),a
; Check for mode 1 or LYC blocking
tst a,$50
jr nz,ppu_mode2_continue
sbc hl,hl ;ld hl,active_ints
set 1,(hl)
dec h
jr ppu_mode2_continue
ppu_expired_mode2_line_0:
ld hl,ppu_expired_mode2
push hl
inc sp
inc sp
; Check if LYC is 0
ld hl,LYC
ld a,h ;$FF
ld (ppu_mode2_LY),a
and (hl)
ld l,STAT & $FF
ld a,(hl)
jr z,ppu_mode2_line_0_lyc_match
; Check for mode 1 blocking
bit 4,a
jr nz,ppu_mode2_blocked_fast
ppu_mode2_not_blocked:
sbc hl,hl ;ld hl,active_ints
ppu_expired_mode2:
; Request STAT interrupt
set 1,(hl) ;active_ints
; Set mode 2
ld hl,STAT
ppu_mode2_blocked:
ld a,(hl)
ppu_mode2_blocked_fast:
and $F8
or 2
ld (hl),a
ppu_mode2_continue:
; Allow catch-up rendering if this frame is not skipped
ppu_mode2_enable_catchup_smc = $+1
ld r,a
CPU_SPEED_IMM8($+1)
ld l,-MODE_2_CYCLES
add hl,de
ld (nextupdatecycle_STAT),hl
CPU_SPEED_IMM8($+1)
ld hl,-CYCLES_PER_SCANLINE
ex de,hl
add hl,de
ld (nextupdatecycle_LY),hl
ld (ppu_counter),hl
ppu_mode2_LY = $+1
ld a,0
inc a
ld (ppu_mode2_LY),a
ld (LY),a
ppu_mode2_event_line = $+1
cp 0
jp nz,audio_counter_checker
; Check whether vblank should be handled immediately
cp VBLANK_SCANLINE
jr z,ppu_mode2_handle_vblank
; Check whether LYC actually matched this line
ld hl,LYC
cp (hl)
jr nz,ppu_mode2_lyc_mismatch
; Set LYC coincidence bit
ld l,STAT & $FF
set 2,(hl)
; Record the successful LYC match
ld (last_lyc_match),a
; Get the next prediction and set the event line
ld.lil hl,lyc_prediction_list
ld l,a
ld.l a,(hl)
ld (ppu_mode2_event_line),a
ld (writeLYC_event_line_smc),a
call ppu_scheduled
ppu_expired_mode2_maybe_lyc_block:
ld hl,ppu_expired_mode2
push hl
inc sp
inc sp
; Check if LYC is still blocking
ld a,(last_lyc_match)
ld hl,LYC
xor (hl)
jr nz,ppu_mode2_not_blocked
ld l,STAT & $FF
bit 6,(hl)
jr z,ppu_mode2_not_blocked
jr ppu_mode2_blocked
ppu_mode2_handle_vblank:
; Trigger vblank interrupt, STAT interrupt is blocked
sbc hl,hl ;ld hl,active_ints
set 0,(hl)
sbc hl,bc
ex de,hl
jp ppu_expired_vblank_continue
ppu_mode2_lyc_mismatch:
; Set the new event line to either LYC or vblank, whichever is sooner
ld a,VBLANK_SCANLINE
jr nc,_
cp (hl)
jr c,_
ld a,(hl)
_
ld (ppu_mode2_event_line),a
ld (writeLYC_event_line_smc),a
; Reset the prediction for the last successful LYC match
ld.lil hl,(z80codebase+last_lyc_match)
ld.l (hl),a
; Schedule non-blocked mode 2 event
ld hl,ppu_expired_mode2
push hl
jp ppu_scheduled
ppu_expired_mode0_line_0:
xor a
ld (ppu_mode0_LY),a
ld hl,LYC
or (hl)
jr z,ppu_expired_mode0_maybe_lyc_block
ld hl,ppu_expired_mode0
push hl
inc sp
inc sp
sbc hl,hl ;ld hl,active_ints
ppu_expired_mode0:
; Request STAT interrupt
set 1,(hl) ;active_ints
; Set mode 0
ld hl,STAT
ld a,(hl)
and $F8
ld (hl),a
ppu_mode0_blocked:
; Allow catch-up rendering if this frame is not skipped
ppu_mode0_enable_catchup_smc = $+1
ld r,a
CPU_SPEED_IMM8($+1)
ld l,-MODE_0_CYCLES
add hl,de
ld (nextupdatecycle_STAT),hl
ld (nextupdatecycle_LY),hl
CPU_SPEED_IMM8($+1)
ld hl,-CYCLES_PER_SCANLINE
ex de,hl
add hl,de
ld (ppu_counter),hl
ppu_mode0_LY = $+1
ld a,0
ld (LY),a
inc a