forked from antonblanchard/microwatt
-
Notifications
You must be signed in to change notification settings - Fork 0
/
dcache.vhdl
1765 lines (1607 loc) · 68.6 KB
/
dcache.vhdl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
--
-- Set associative dcache write-through
--
--
library ieee;
use ieee.std_logic_1164.all;
use ieee.numeric_std.all;
library work;
use work.utils.all;
use work.common.all;
use work.helpers.all;
use work.wishbone_types.all;
entity dcache is
generic (
-- Line size in bytes
LINE_SIZE : positive := 64;
-- Number of lines in a set
NUM_LINES : positive := 32;
-- Number of ways
NUM_WAYS : positive := 4;
-- L1 DTLB entries per set
TLB_SET_SIZE : positive := 64;
-- L1 DTLB number of sets
TLB_NUM_WAYS : positive := 2;
-- L1 DTLB log_2(page_size)
TLB_LG_PGSZ : positive := 12;
-- Non-zero to enable log data collection
LOG_LENGTH : natural := 0
);
port (
clk : in std_ulogic;
rst : in std_ulogic;
d_in : in Loadstore1ToDcacheType;
d_out : out DcacheToLoadstore1Type;
m_in : in MmuToDcacheType;
m_out : out DcacheToMmuType;
snoop_in : in wishbone_master_out := wishbone_master_out_init;
stall_out : out std_ulogic;
wishbone_out : out wishbone_master_out;
wishbone_in : in wishbone_slave_out;
events : out DcacheEventType;
log_out : out std_ulogic_vector(19 downto 0)
);
end entity dcache;
architecture rtl of dcache is
-- BRAM organisation: We never access more than wishbone_data_bits at
-- a time so to save resources we make the array only that wide, and
-- use consecutive indices to make a cache "line"
--
-- ROW_SIZE is the width in bytes of the BRAM (based on WB, so 64-bits)
constant ROW_SIZE : natural := wishbone_data_bits / 8;
-- ROW_PER_LINE is the number of row (wishbone transactions) in a line
constant ROW_PER_LINE : natural := LINE_SIZE / ROW_SIZE;
-- BRAM_ROWS is the number of rows in BRAM needed to represent the full
-- dcache
constant BRAM_ROWS : natural := NUM_LINES * ROW_PER_LINE;
-- Bit fields counts in the address
-- ROW_BITS is the number of bits to select a row
constant ROW_BITS : natural := log2(BRAM_ROWS);
-- ROW_LINEBITS is the number of bits to select a row within a line
constant ROW_LINEBITS : natural := log2(ROW_PER_LINE);
-- LINE_OFF_BITS is the number of bits for the offset in a cache line
constant LINE_OFF_BITS : natural := log2(LINE_SIZE);
-- ROW_OFF_BITS is the number of bits for the offset in a row
constant ROW_OFF_BITS : natural := log2(ROW_SIZE);
-- INDEX_BITS is the number if bits to select a cache line
constant INDEX_BITS : natural := log2(NUM_LINES);
-- SET_SIZE_BITS is the log base 2 of the set size
constant SET_SIZE_BITS : natural := LINE_OFF_BITS + INDEX_BITS;
-- TAG_BITS is the number of bits of the tag part of the address
constant TAG_BITS : natural := REAL_ADDR_BITS - SET_SIZE_BITS;
-- TAG_WIDTH is the width in bits of each way of the tag RAM
constant TAG_WIDTH : natural := TAG_BITS + 7 - ((TAG_BITS + 7) mod 8);
-- WAY_BITS is the number of bits to select a way
-- Make sure this is at least 1, to avoid 0-element vectors
constant WAY_BITS : natural := maximum(log2(NUM_WAYS), 1);
-- Example of layout for 32 lines of 64 bytes:
--
-- .. tag |index| line |
-- .. | row | |
-- .. | |---| | ROW_LINEBITS (3)
-- .. | |--- - --| LINE_OFF_BITS (6)
-- .. | |- --| ROW_OFF_BITS (3)
-- .. |----- ---| | ROW_BITS (8)
-- .. |-----| | INDEX_BITS (5)
-- .. --------| | TAG_BITS (45)
subtype row_t is unsigned(ROW_BITS-1 downto 0);
subtype index_t is unsigned(INDEX_BITS-1 downto 0);
subtype way_t is unsigned(WAY_BITS-1 downto 0);
subtype row_in_line_t is unsigned(ROW_LINEBITS-1 downto 0);
-- The cache data BRAM organized as described above for each way
subtype cache_row_t is std_ulogic_vector(wishbone_data_bits-1 downto 0);
-- The cache tags LUTRAM has a row per set. Vivado is a pain and will
-- not handle a clean (commented) definition of the cache tags as a 3d
-- memory. For now, work around it by putting all the tags
subtype cache_tag_t is std_logic_vector(TAG_BITS-1 downto 0);
-- type cache_tags_set_t is array(way_t) of cache_tag_t;
-- type cache_tags_array_t is array(0 to NUM_LINES-1) of cache_tags_set_t;
constant TAG_RAM_WIDTH : natural := TAG_WIDTH * NUM_WAYS;
subtype cache_tags_set_t is std_logic_vector(TAG_RAM_WIDTH-1 downto 0);
type cache_tags_array_t is array(0 to NUM_LINES-1) of cache_tags_set_t;
-- The cache valid bits
subtype cache_way_valids_t is std_ulogic_vector(NUM_WAYS-1 downto 0);
type cache_valids_t is array(0 to NUM_LINES-1) of cache_way_valids_t;
type row_per_line_valid_t is array(0 to ROW_PER_LINE - 1) of std_ulogic;
-- Storage. Hopefully implemented in LUTs
signal cache_tags : cache_tags_array_t;
signal cache_tag_set : cache_tags_set_t;
signal cache_valids : cache_valids_t;
attribute ram_style : string;
attribute ram_style of cache_tags : signal is "distributed";
-- L1 TLB.
constant TLB_SET_BITS : natural := log2(TLB_SET_SIZE);
constant TLB_WAY_BITS : natural := maximum(log2(TLB_NUM_WAYS), 1);
constant TLB_EA_TAG_BITS : natural := 64 - (TLB_LG_PGSZ + TLB_SET_BITS);
constant TLB_TAG_WAY_BITS : natural := TLB_NUM_WAYS * TLB_EA_TAG_BITS;
constant TLB_PTE_BITS : natural := 64;
constant TLB_PTE_WAY_BITS : natural := TLB_NUM_WAYS * TLB_PTE_BITS;
subtype tlb_way_t is integer range 0 to TLB_NUM_WAYS - 1;
subtype tlb_way_sig_t is unsigned(TLB_WAY_BITS-1 downto 0);
subtype tlb_index_t is integer range 0 to TLB_SET_SIZE - 1;
subtype tlb_index_sig_t is unsigned(TLB_SET_BITS-1 downto 0);
subtype tlb_way_valids_t is std_ulogic_vector(TLB_NUM_WAYS-1 downto 0);
type tlb_valids_t is array(tlb_index_t) of tlb_way_valids_t;
subtype tlb_tag_t is std_ulogic_vector(TLB_EA_TAG_BITS - 1 downto 0);
subtype tlb_way_tags_t is std_ulogic_vector(TLB_TAG_WAY_BITS-1 downto 0);
type tlb_tags_t is array(tlb_index_t) of tlb_way_tags_t;
subtype tlb_pte_t is std_ulogic_vector(TLB_PTE_BITS - 1 downto 0);
subtype tlb_way_ptes_t is std_ulogic_vector(TLB_PTE_WAY_BITS-1 downto 0);
type tlb_ptes_t is array(tlb_index_t) of tlb_way_ptes_t;
type hit_way_set_t is array(tlb_way_t) of way_t;
signal dtlb_valids : tlb_valids_t;
signal dtlb_tags : tlb_tags_t;
signal dtlb_ptes : tlb_ptes_t;
attribute ram_style of dtlb_tags : signal is "distributed";
attribute ram_style of dtlb_ptes : signal is "distributed";
-- Record for storing permission, attribute, etc. bits from a PTE
type perm_attr_t is record
reference : std_ulogic;
changed : std_ulogic;
nocache : std_ulogic;
priv : std_ulogic;
rd_perm : std_ulogic;
wr_perm : std_ulogic;
end record;
function extract_perm_attr(pte : std_ulogic_vector(TLB_PTE_BITS - 1 downto 0)) return perm_attr_t is
variable pa : perm_attr_t;
begin
pa.reference := pte(8);
pa.changed := pte(7);
pa.nocache := pte(5);
pa.priv := pte(3);
pa.rd_perm := pte(2);
pa.wr_perm := pte(1);
return pa;
end;
constant real_mode_perm_attr : perm_attr_t := (nocache => '0', others => '1');
-- Type of operation on a "valid" input
type op_t is (OP_NONE,
OP_BAD, -- NC cache hit, TLB miss, prot/RC failure
OP_STCX_FAIL, -- conditional store w/o reservation
OP_LOAD_HIT, -- Cache hit on load
OP_LOAD_MISS, -- Load missing cache
OP_LOAD_NC, -- Non-cachable load
OP_STORE_HIT, -- Store hitting cache
OP_STORE_MISS); -- Store missing cache
-- Cache state machine
type state_t is (IDLE, -- Normal load hit processing
RELOAD_WAIT_ACK, -- Cache reload wait ack
STORE_WAIT_ACK, -- Store wait ack
NC_LOAD_WAIT_ACK);-- Non-cachable load wait ack
--
-- Dcache operations:
--
-- In order to make timing, we use the BRAMs with an output buffer,
-- which means that the BRAM output is delayed by an extra cycle.
--
-- Thus, the dcache has a 2-stage internal pipeline for cache hits
-- with no stalls. Stores also complete in 2 cycles in most
-- circumstances.
--
-- A request proceeds through the pipeline as follows.
--
-- Cycle 0: Request is received from loadstore or mmu if either
-- d_in.valid or m_in.valid is 1 (not both). In this cycle portions
-- of the address are presented to the TLB tag RAM and data RAM
-- and the cache tag RAM and data RAM.
--
-- Clock edge between cycle 0 and cycle 1:
-- Request is stored in r0 (assuming r0_full was 0). TLB tag and
-- data RAMs are read, and the cache tag RAM is read. (Cache data
-- comes out a cycle later due to its output register, giving the
-- whole of cycle 1 to read the cache data RAM.)
--
-- Cycle 1: TLB and cache tag matching is done, the real address
-- (RA) for the access is calculated, and the type of operation is
-- determined (the OP_* values above). This gives the TLB way for
-- a TLB hit, and the cache way for a hit or the way to replace
-- for a load miss.
--
-- Clock edge between cycle 1 and cycle 2:
-- Request is stored in r1 (assuming r1.full was 0)
-- The state machine transitions out of IDLE state for a load miss,
-- a store, a dcbz, or a non-cacheable load. r1.full is set to 1
-- for a load miss, dcbz or non-cacheable load but not a store.
--
-- Cycle 2: Completion signals are asserted for a load hit,
-- a store (excluding dcbz), a TLB operation, a conditional
-- store which failed due to no matching reservation, or an error
-- (cache hit on non-cacheable operation, TLB miss, or protection
-- fault).
--
-- For a load miss, store, or dcbz, the state machine initiates
-- a wishbone cycle, which takes at least 2 cycles. For a store,
-- if another store comes in with the same cache tag (therefore
-- in the same 4k page), it can be added on to the existing cycle,
-- subject to some constraints.
-- While r1.full = 1, no new requests can go from r0 to r1, but
-- requests can come in to r0 and be satisfied if they are
-- cacheable load hits or stores with the same cache tag.
--
-- Writing to the cache data RAM is done at the clock edge
-- at the end of cycle 2 for a store hit (excluding dcbz).
-- Stores that miss are not written to the cache data RAM
-- but just stored through to memory.
-- Dcbz is done like a cache miss, but the wishbone cycle
-- is a write rather than a read, and zeroes are written to
-- the cache data RAM. Thus dcbz will allocate the line in
-- the cache as well as zeroing memory.
--
-- Since stores are written to the cache data RAM at the end of
-- cycle 2, and loads can come in and hit on the data just stored,
-- there is a two-stage bypass from store data to load data to
-- make sure that loads always see previously-stored data even
-- if it has not yet made it to the cache data RAM.
--
-- Load misses read the requested dword of the cache line first in
-- the memory read request and then cycle around through the other
-- dwords. The load is completed on the cycle after the requested
-- dword comes back from memory (using a forwarding path, rather
-- than going via the cache data RAM). We maintain an array of
-- valid bits per dword for the line being refilled so that
-- subsequent load requests to the same line can be completed as
-- soon as the necessary data comes in from memory, without
-- waiting for the whole line to be read.
-- Stage 0 register, basically contains just the latched request
type reg_stage_0_t is record
req : Loadstore1ToDcacheType;
tlbie : std_ulogic; -- indicates a tlbie request (from MMU)
doall : std_ulogic; -- with tlbie, indicates flush whole TLB
tlbld : std_ulogic; -- indicates a TLB load request (from MMU)
mmu_req : std_ulogic; -- indicates source of request
d_valid : std_ulogic; -- indicates req.data is valid now
end record;
signal r0 : reg_stage_0_t;
signal r0_full : std_ulogic;
type mem_access_request_t is record
op : op_t;
valid : std_ulogic;
dcbz : std_ulogic;
real_addr : real_addr_t;
data : std_ulogic_vector(63 downto 0);
byte_sel : std_ulogic_vector(7 downto 0);
hit_way : way_t;
same_tag : std_ulogic;
mmu_req : std_ulogic;
end record;
-- First stage register, contains state for stage 1 of load hits
-- and for the state machine used by all other operations
--
type reg_stage_1_t is record
-- Info about the request
full : std_ulogic; -- have uncompleted request
mmu_req : std_ulogic; -- request is from MMU
req : mem_access_request_t;
-- Cache hit state
hit_way : way_t;
hit_load_valid : std_ulogic;
hit_index : index_t;
cache_hit : std_ulogic;
-- TLB hit state
tlb_hit : std_ulogic;
tlb_hit_way : tlb_way_sig_t;
tlb_hit_index : tlb_index_sig_t;
tlb_victim : tlb_way_sig_t;
-- data buffer for data forwarded from writes to reads
forward_data : std_ulogic_vector(63 downto 0);
forward_tag : cache_tag_t;
forward_sel : std_ulogic_vector(7 downto 0);
forward_valid : std_ulogic;
forward_row : row_t;
data_out : std_ulogic_vector(63 downto 0);
-- Cache miss state (reload state machine)
state : state_t;
dcbz : std_ulogic;
write_bram : std_ulogic;
write_tag : std_ulogic;
slow_valid : std_ulogic;
wb : wishbone_master_out;
reload_tag : cache_tag_t;
store_way : way_t;
store_row : row_t;
store_index : index_t;
end_row_ix : row_in_line_t;
rows_valid : row_per_line_valid_t;
acks_pending : unsigned(2 downto 0);
stalled : std_ulogic;
dec_acks : std_ulogic;
choose_victim : std_ulogic;
victim_way : way_t;
-- Signals to complete (possibly with error)
ls_valid : std_ulogic;
ls_error : std_ulogic;
mmu_done : std_ulogic;
mmu_error : std_ulogic;
cache_paradox : std_ulogic;
-- Signal to complete a failed stcx.
stcx_fail : std_ulogic;
end record;
signal r1 : reg_stage_1_t;
signal ev : DcacheEventType;
-- Reservation information
--
type reservation_t is record
valid : std_ulogic;
addr : std_ulogic_vector(63 downto LINE_OFF_BITS);
end record;
signal reservation : reservation_t;
-- Async signals on incoming request
signal req_index : index_t;
signal req_hit_way : way_t;
signal req_tag : cache_tag_t;
signal req_op : op_t;
signal req_data : std_ulogic_vector(63 downto 0);
signal req_same_tag : std_ulogic;
signal req_go : std_ulogic;
signal early_req_row : row_t;
signal early_rd_valid : std_ulogic;
signal cancel_store : std_ulogic;
signal set_rsrv : std_ulogic;
signal clear_rsrv : std_ulogic;
signal r0_valid : std_ulogic;
signal r0_stall : std_ulogic;
signal fwd_same_tag : std_ulogic;
signal use_forward_st : std_ulogic;
signal use_forward_rl : std_ulogic;
signal use_forward2 : std_ulogic;
-- Cache RAM interface
type cache_ram_out_t is array(0 to NUM_WAYS-1) of cache_row_t;
signal cache_out : cache_ram_out_t;
signal ram_wr_data : cache_row_t;
signal ram_wr_select : std_ulogic_vector(ROW_SIZE - 1 downto 0);
-- PLRU output interface
signal plru_victim : way_t;
signal replace_way : way_t;
-- Wishbone read/write/cache write formatting signals
signal bus_sel : std_ulogic_vector(7 downto 0);
-- TLB signals
signal tlb_tag_way : tlb_way_tags_t;
signal tlb_pte_way : tlb_way_ptes_t;
signal tlb_valid_way : tlb_way_valids_t;
signal tlb_req_index : tlb_index_sig_t;
signal tlb_read_valid : std_ulogic;
signal tlb_hit : std_ulogic;
signal tlb_hit_way : tlb_way_sig_t;
signal pte : tlb_pte_t;
signal ra : real_addr_t;
signal valid_ra : std_ulogic;
signal perm_attr : perm_attr_t;
signal rc_ok : std_ulogic;
signal perm_ok : std_ulogic;
signal access_ok : std_ulogic;
signal tlb_miss : std_ulogic;
-- TLB PLRU output interface
signal tlb_plru_victim : std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
signal snoop_tag_set : cache_tags_set_t;
signal snoop_valid : std_ulogic;
signal snoop_wrtag : cache_tag_t;
signal snoop_index : index_t;
--
-- Helper functions to decode incoming requests
--
-- Return the cache line index (tag index) for an address
function get_index(addr: std_ulogic_vector) return index_t is
begin
return unsigned(addr(SET_SIZE_BITS - 1 downto LINE_OFF_BITS));
end;
-- Return the cache row index (data memory) for an address
function get_row(addr: std_ulogic_vector) return row_t is
begin
return unsigned(addr(SET_SIZE_BITS - 1 downto ROW_OFF_BITS));
end;
-- Return the index of a row within a line
function get_row_of_line(row: row_t) return row_in_line_t is
begin
return row(ROW_LINEBITS-1 downto 0);
end;
-- Returns whether this is the last row of a line
function is_last_row_wb_addr(addr: wishbone_addr_type; last: row_in_line_t) return boolean is
begin
return unsigned(addr(LINE_OFF_BITS - ROW_OFF_BITS - 1 downto 0)) = last;
end;
-- Returns whether this is the last row of a line
function is_last_row(row: row_t; last: row_in_line_t) return boolean is
begin
return get_row_of_line(row) = last;
end;
-- Return the address of the next row in the current cache line
function next_row_wb_addr(addr: wishbone_addr_type) return std_ulogic_vector is
variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
variable result : wishbone_addr_type;
begin
-- Is there no simpler way in VHDL to generate that 3 bits adder ?
row_idx := addr(ROW_LINEBITS - 1 downto 0);
row_idx := std_ulogic_vector(unsigned(row_idx) + 1);
result := addr;
result(ROW_LINEBITS - 1 downto 0) := row_idx;
return result;
end;
-- Return the next row in the current cache line. We use a dedicated
-- function in order to limit the size of the generated adder to be
-- only the bits within a cache line (3 bits with default settings)
--
function next_row(row: row_t) return row_t is
variable row_v : std_ulogic_vector(ROW_BITS-1 downto 0);
variable row_idx : std_ulogic_vector(ROW_LINEBITS-1 downto 0);
variable result : std_ulogic_vector(ROW_BITS-1 downto 0);
begin
row_v := std_ulogic_vector(row);
row_idx := row_v(ROW_LINEBITS-1 downto 0);
row_v(ROW_LINEBITS-1 downto 0) := std_ulogic_vector(unsigned(row_idx) + 1);
return unsigned(row_v);
end;
-- Get the tag value from the address
function get_tag(addr: std_ulogic_vector) return cache_tag_t is
begin
return addr(REAL_ADDR_BITS - 1 downto SET_SIZE_BITS);
end;
-- Read a tag from a tag memory row
function read_tag(way: integer; tagset: cache_tags_set_t) return cache_tag_t is
begin
return tagset(way * TAG_WIDTH + TAG_BITS - 1 downto way * TAG_WIDTH);
end;
-- Read a TLB tag from a TLB tag memory row
function read_tlb_tag(way: tlb_way_t; tags: tlb_way_tags_t) return tlb_tag_t is
variable j : integer;
begin
j := way * TLB_EA_TAG_BITS;
return tags(j + TLB_EA_TAG_BITS - 1 downto j);
end;
-- Write a TLB tag to a TLB tag memory row
procedure write_tlb_tag(way: tlb_way_t; tags: inout tlb_way_tags_t;
tag: tlb_tag_t) is
variable j : integer;
begin
j := way * TLB_EA_TAG_BITS;
tags(j + TLB_EA_TAG_BITS - 1 downto j) := tag;
end;
-- Read a PTE from a TLB PTE memory row
function read_tlb_pte(way: tlb_way_t; ptes: tlb_way_ptes_t) return tlb_pte_t is
variable j : integer;
begin
j := way * TLB_PTE_BITS;
return ptes(j + TLB_PTE_BITS - 1 downto j);
end;
procedure write_tlb_pte(way: tlb_way_t; ptes: inout tlb_way_ptes_t; newpte: tlb_pte_t) is
variable j : integer;
begin
j := way * TLB_PTE_BITS;
ptes(j + TLB_PTE_BITS - 1 downto j) := newpte;
end;
begin
assert LINE_SIZE mod ROW_SIZE = 0 report "LINE_SIZE not multiple of ROW_SIZE" severity FAILURE;
assert ispow2(LINE_SIZE) report "LINE_SIZE not power of 2" severity FAILURE;
assert ispow2(NUM_LINES) report "NUM_LINES not power of 2" severity FAILURE;
assert ispow2(ROW_PER_LINE) and ROW_PER_LINE > 1
report "ROW_PER_LINE not power of 2 greater than 1" severity FAILURE;
assert (ROW_BITS = INDEX_BITS + ROW_LINEBITS)
report "geometry bits don't add up" severity FAILURE;
assert (LINE_OFF_BITS = ROW_OFF_BITS + ROW_LINEBITS)
report "geometry bits don't add up" severity FAILURE;
assert (REAL_ADDR_BITS = TAG_BITS + INDEX_BITS + LINE_OFF_BITS)
report "geometry bits don't add up" severity FAILURE;
assert (REAL_ADDR_BITS = TAG_BITS + ROW_BITS + ROW_OFF_BITS)
report "geometry bits don't add up" severity FAILURE;
assert (64 = wishbone_data_bits)
report "Can't yet handle a wishbone width that isn't 64-bits" severity FAILURE;
assert SET_SIZE_BITS <= TLB_LG_PGSZ report "Set indexed by virtual address" severity FAILURE;
-- Latch the request in r0.req as long as we're not stalling
stage_0 : process(clk)
variable r : reg_stage_0_t;
begin
if rising_edge(clk) then
assert (d_in.valid and m_in.valid) = '0' report
"request collision loadstore vs MMU";
if m_in.valid = '1' then
r.req.valid := '1';
r.req.load := not (m_in.tlbie or m_in.tlbld);
r.req.dcbz := '0';
r.req.nc := '0';
r.req.reserve := '0';
r.req.virt_mode := '0';
r.req.priv_mode := '1';
r.req.addr := m_in.addr;
r.req.data := m_in.pte;
r.req.byte_sel := (others => '1');
r.tlbie := m_in.tlbie;
r.doall := m_in.doall;
r.tlbld := m_in.tlbld;
r.mmu_req := '1';
r.d_valid := '1';
else
r.req := d_in;
r.req.data := (others => '0');
r.tlbie := '0';
r.doall := '0';
r.tlbld := '0';
r.mmu_req := '0';
r.d_valid := '0';
end if;
if r.req.valid = '1' and r.doall = '0' then
assert not is_X(r.req.addr) severity failure;
end if;
if rst = '1' then
r0_full <= '0';
elsif r1.full = '0' and d_in.hold = '0' then
r0 <= r;
r0_full <= r.req.valid;
elsif r0.d_valid = '0' then
-- Sample data the cycle after a request comes in from loadstore1.
-- If this request is already moving into r1 then the data will get
-- put directly into req.data in the dcache_slow process below.
r0.req.data <= d_in.data;
r0.d_valid <= r0.req.valid;
end if;
end if;
end process;
-- we don't yet handle collisions between loadstore1 requests and MMU requests
m_out.stall <= '0';
-- Hold off the request in r0 when r1 has an uncompleted request
r0_stall <= r1.full or d_in.hold;
r0_valid <= r0_full and not r1.full and not d_in.hold;
stall_out <= r1.full;
events <= ev;
-- TLB
-- Operates in the second cycle on the request latched in r0.req.
-- TLB updates write the entry at the end of the second cycle.
tlb_read : process(clk)
variable index : tlb_index_t;
variable addrbits : std_ulogic_vector(TLB_SET_BITS - 1 downto 0);
variable valid : std_ulogic;
begin
if rising_edge(clk) then
if m_in.valid = '1' then
addrbits := m_in.addr(TLB_LG_PGSZ + TLB_SET_BITS - 1 downto TLB_LG_PGSZ);
valid := not (m_in.tlbie and m_in.doall);
else
addrbits := d_in.addr(TLB_LG_PGSZ + TLB_SET_BITS - 1 downto TLB_LG_PGSZ);
valid := d_in.valid;
end if;
-- If the previous op isn't finished,
-- then keep the same output for next cycle.
if r0_stall = '0' then
assert not (valid = '1' and is_X(addrbits));
if is_X(addrbits) then
tlb_valid_way <= (others => 'X');
tlb_tag_way <= (others => 'X');
tlb_pte_way <= (others => 'X');
else
index := to_integer(unsigned(addrbits));
tlb_valid_way <= dtlb_valids(index);
tlb_tag_way <= dtlb_tags(index);
tlb_pte_way <= dtlb_ptes(index);
end if;
end if;
if rst = '1' then
tlb_read_valid <= '0';
elsif r0_stall = '0' then
tlb_read_valid <= valid;
end if;
end if;
end process;
-- Generate TLB PLRUs
maybe_tlb_plrus : if TLB_NUM_WAYS > 1 generate
type tlb_plru_array is array(tlb_index_t) of std_ulogic_vector(TLB_NUM_WAYS - 2 downto 0);
signal tlb_plru_ram : tlb_plru_array;
signal tlb_plru_cur : std_ulogic_vector(TLB_NUM_WAYS - 2 downto 0);
signal tlb_plru_upd : std_ulogic_vector(TLB_NUM_WAYS - 2 downto 0);
signal tlb_plru_acc : std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
signal tlb_plru_out : std_ulogic_vector(TLB_WAY_BITS-1 downto 0);
begin
tlb_plru : entity work.plrufn
generic map (
BITS => TLB_WAY_BITS
)
port map (
acc => tlb_plru_acc,
tree_in => tlb_plru_cur,
tree_out => tlb_plru_upd,
lru => tlb_plru_out
);
process(all)
begin
-- Read PLRU bits from array
if is_X(r1.tlb_hit_index) then
tlb_plru_cur <= (others => 'X');
else
tlb_plru_cur <= tlb_plru_ram(to_integer(r1.tlb_hit_index));
end if;
-- PLRU interface
tlb_plru_acc <= std_ulogic_vector(r1.tlb_hit_way);
tlb_plru_victim <= tlb_plru_out;
end process;
-- synchronous writes to TLB PLRU array
process(clk)
begin
if rising_edge(clk) then
if r1.tlb_hit = '1' then
assert not is_X(r1.tlb_hit_index) severity failure;
tlb_plru_ram(to_integer(r1.tlb_hit_index)) <= tlb_plru_upd;
end if;
end if;
end process;
end generate;
tlb_search : process(all)
variable hitway : tlb_way_sig_t;
variable hit : std_ulogic;
variable eatag : tlb_tag_t;
begin
tlb_req_index <= unsigned(r0.req.addr(TLB_LG_PGSZ + TLB_SET_BITS - 1
downto TLB_LG_PGSZ));
hitway := to_unsigned(0, TLB_WAY_BITS);
hit := '0';
eatag := r0.req.addr(63 downto TLB_LG_PGSZ + TLB_SET_BITS);
for i in tlb_way_t loop
if tlb_read_valid = '1' and tlb_valid_way(i) = '1' and
read_tlb_tag(i, tlb_tag_way) = eatag then
hitway := to_unsigned(i, TLB_WAY_BITS);
hit := '1';
end if;
end loop;
tlb_hit <= hit and r0_valid;
tlb_hit_way <= hitway;
if tlb_hit = '1' then
pte <= read_tlb_pte(to_integer(hitway), tlb_pte_way);
else
pte <= (others => '0');
end if;
valid_ra <= tlb_hit or not r0.req.virt_mode;
tlb_miss <= r0_valid and r0.req.virt_mode and not tlb_hit;
if r0.req.virt_mode = '1' then
ra <= pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ) &
r0.req.addr(TLB_LG_PGSZ - 1 downto ROW_OFF_BITS) &
(ROW_OFF_BITS-1 downto 0 => '0');
perm_attr <= extract_perm_attr(pte);
else
ra <= r0.req.addr(REAL_ADDR_BITS - 1 downto ROW_OFF_BITS) &
(ROW_OFF_BITS-1 downto 0 => '0');
perm_attr <= real_mode_perm_attr;
end if;
end process;
tlb_update : process(clk)
variable tlbie : std_ulogic;
variable tlbwe : std_ulogic;
variable repl_way : tlb_way_sig_t;
variable eatag : tlb_tag_t;
variable tagset : tlb_way_tags_t;
variable pteset : tlb_way_ptes_t;
begin
if rising_edge(clk) then
tlbie := r0_valid and r0.tlbie;
tlbwe := r0_valid and r0.tlbld;
ev.dtlb_miss_resolved <= tlbwe;
if rst = '1' or (tlbie = '1' and r0.doall = '1') then
-- clear all valid bits at once
for i in tlb_index_t loop
dtlb_valids(i) <= (others => '0');
end loop;
elsif tlbie = '1' then
if tlb_hit = '1' then
assert not is_X(tlb_req_index);
assert not is_X(tlb_hit_way);
dtlb_valids(to_integer(tlb_req_index))(to_integer(tlb_hit_way)) <= '0';
end if;
elsif tlbwe = '1' then
assert not is_X(tlb_req_index);
repl_way := to_unsigned(0, TLB_WAY_BITS);
if TLB_NUM_WAYS > 1 then
if tlb_hit = '1' then
repl_way := tlb_hit_way;
else
repl_way := unsigned(r1.tlb_victim);
end if;
assert not is_X(repl_way);
end if;
eatag := r0.req.addr(63 downto TLB_LG_PGSZ + TLB_SET_BITS);
tagset := tlb_tag_way;
write_tlb_tag(to_integer(repl_way), tagset, eatag);
dtlb_tags(to_integer(tlb_req_index)) <= tagset;
pteset := tlb_pte_way;
write_tlb_pte(to_integer(repl_way), pteset, r0.req.data);
dtlb_ptes(to_integer(tlb_req_index)) <= pteset;
dtlb_valids(to_integer(tlb_req_index))(to_integer(repl_way)) <= '1';
end if;
end if;
end process;
-- Generate PLRUs
maybe_plrus : if NUM_WAYS > 1 generate
type plru_array is array(0 to NUM_LINES-1) of std_ulogic_vector(NUM_WAYS - 2 downto 0);
signal plru_ram : plru_array;
signal plru_cur : std_ulogic_vector(NUM_WAYS - 2 downto 0);
signal plru_upd : std_ulogic_vector(NUM_WAYS - 2 downto 0);
signal plru_acc : std_ulogic_vector(WAY_BITS-1 downto 0);
signal plru_out : std_ulogic_vector(WAY_BITS-1 downto 0);
begin
plru : entity work.plrufn
generic map (
BITS => WAY_BITS
)
port map (
acc => plru_acc,
tree_in => plru_cur,
tree_out => plru_upd,
lru => plru_out
);
process(all)
begin
-- Read PLRU bits from array
if is_X(r1.hit_index) then
plru_cur <= (others => 'X');
else
plru_cur <= plru_ram(to_integer(r1.hit_index));
end if;
-- PLRU interface
plru_acc <= std_ulogic_vector(r1.hit_way);
plru_victim <= unsigned(plru_out);
end process;
-- synchronous writes to PLRU array
process(clk)
begin
if rising_edge(clk) then
-- We update the PLRU when hitting the cache or when replacing
-- an entry. The PLRU update will be "visible" on the next cycle
-- so the victim selection will correctly see the *old* value.
if r1.cache_hit = '1' or r1.choose_victim = '1' then
report "PLRU update, index=" & to_hstring(r1.hit_index) &
" way=" & to_hstring(r1.hit_way);
assert not is_X(r1.hit_index) severity failure;
plru_ram(to_integer(r1.hit_index)) <= plru_upd;
end if;
end if;
end process;
end generate;
-- Cache tag RAM read port
cache_tag_read : process(clk)
variable index : index_t;
variable valid : std_ulogic;
begin
if rising_edge(clk) then
if r0_stall = '1' then
index := req_index;
valid := r0.req.valid and not (r0.tlbie or r0.tlbld);
elsif m_in.valid = '1' then
index := get_index(m_in.addr);
valid := not (m_in.tlbie or m_in.tlbld);
else
index := get_index(d_in.addr);
valid := d_in.valid;
end if;
if valid = '1' then
cache_tag_set <= cache_tags(to_integer(index));
else
cache_tag_set <= (others => '0');
end if;
end if;
end process;
-- Cache tag RAM second read port, for snooping
cache_tag_read_2 : process(clk)
variable addr : real_addr_t;
begin
if rising_edge(clk) then
-- Don't snoop our own cycles
snoop_valid <= '0';
if not (r1.wb.cyc = '1' and wishbone_in.stall = '0') then
if (snoop_in.cyc and snoop_in.stb and snoop_in.we) = '1' then
snoop_valid <= '1';
addr := addr_to_real(wb_to_addr(snoop_in.adr));
assert not is_X(addr);
snoop_tag_set <= cache_tags(to_integer(get_index(addr)));
snoop_wrtag <= get_tag(addr);
snoop_index <= get_index(addr);
end if;
end if;
end if;
end process;
-- Cache request parsing and hit detection
dcache_request : process(all)
variable req_row : row_t;
variable rindex : index_t;
variable is_hit : std_ulogic;
variable hit_way : way_t;
variable op : op_t;
variable opsel : std_ulogic_vector(2 downto 0);
variable go : std_ulogic;
variable nc : std_ulogic;
variable s_hit : std_ulogic;
variable s_tag : cache_tag_t;
variable s_pte : tlb_pte_t;
variable s_ra : real_addr_t;
variable hit_set : std_ulogic_vector(TLB_NUM_WAYS - 1 downto 0);
variable hit_way_set : hit_way_set_t;
variable rel_matches : std_ulogic_vector(TLB_NUM_WAYS - 1 downto 0);
variable rel_match : std_ulogic;
variable fwd_matches : std_ulogic_vector(TLB_NUM_WAYS - 1 downto 0);
variable fwd_match : std_ulogic;
begin
-- Extract line, row and tag from request
rindex := get_index(r0.req.addr);
req_index <= rindex;
req_row := get_row(r0.req.addr);
req_tag <= get_tag(ra);
go := r0_valid and not (r0.tlbie or r0.tlbld) and not r1.ls_error;
if is_X(r0.req.addr) then
go := '0';
end if;
if go = '1' then
assert not is_X(r1.forward_tag);
end if;
-- Test if pending request is a hit on any way
-- In order to make timing in virtual mode, when we are using the TLB,
-- we compare each way with each of the real addresses from each way of
-- the TLB, and then decide later which match to use.
hit_way := to_unsigned(0, WAY_BITS);
is_hit := '0';
rel_match := '0';
fwd_match := '0';
if r0.req.virt_mode = '1' then
rel_matches := (others => '0');
fwd_matches := (others => '0');
for j in tlb_way_t loop
hit_way_set(j) := to_unsigned(0, WAY_BITS);
s_hit := '0';
s_pte := read_tlb_pte(j, tlb_pte_way);
s_ra := s_pte(REAL_ADDR_BITS - 1 downto TLB_LG_PGSZ) &
r0.req.addr(TLB_LG_PGSZ - 1 downto 0);
s_tag := get_tag(s_ra);
if go = '1' then
assert not is_X(s_tag);
end if;
for i in 0 to NUM_WAYS-1 loop
if go = '1' and cache_valids(to_integer(rindex))(i) = '1' and
read_tag(i, cache_tag_set) = s_tag and
tlb_valid_way(j) = '1' then
hit_way_set(j) := to_unsigned(i, WAY_BITS);
s_hit := '1';
end if;
end loop;
hit_set(j) := s_hit;
if go = '1' and not is_X(r1.reload_tag) and s_tag = r1.reload_tag then
rel_matches(j) := '1';
end if;
if go = '1' and s_tag = r1.forward_tag then
fwd_matches(j) := '1';
end if;
end loop;
if tlb_hit = '1' and go = '1' then
assert not is_X(tlb_hit_way);
is_hit := hit_set(to_integer(tlb_hit_way));
hit_way := hit_way_set(to_integer(tlb_hit_way));
rel_match := rel_matches(to_integer(tlb_hit_way));
fwd_match := fwd_matches(to_integer(tlb_hit_way));
end if;
else
s_tag := get_tag(r0.req.addr);
if go = '1' then
assert not is_X(s_tag);
end if;
for i in 0 to NUM_WAYS-1 loop
if go = '1' and cache_valids(to_integer(rindex))(i) = '1' and
read_tag(i, cache_tag_set) = s_tag then
hit_way := to_unsigned(i, WAY_BITS);
is_hit := '1';
end if;
end loop;
if go = '1' and not is_X(r1.reload_tag) and s_tag = r1.reload_tag then
rel_match := '1';
end if;
if go = '1' and s_tag = r1.forward_tag then
fwd_match := '1';
end if;
end if;
req_same_tag <= rel_match;
fwd_same_tag <= fwd_match;
-- Whether to use forwarded data for a load or not
use_forward_st <= '0';
use_forward_rl <= '0';
if rel_match = '1' then
assert not is_X(r1.store_row);
assert not is_X(req_row);
end if;
if rel_match = '1' and r1.store_row = req_row then
-- Use the forwarding path if this cycle is a write to this row
use_forward_st <= r1.write_bram;
if r1.state = RELOAD_WAIT_ACK and wishbone_in.ack = '1' then
use_forward_rl <= '1';
end if;
end if;
use_forward2 <= '0';
if fwd_match = '1' then