2020-08-06 16:58:13 +08:00
|
|
|
package xiangshan.mem
|
2020-08-13 19:33:04 +08:00
|
|
|
|
2020-07-11 18:51:45 +08:00
|
|
|
import chisel3._
|
|
|
|
import chisel3.util._
|
2020-08-06 16:58:13 +08:00
|
|
|
import xiangshan._
|
2020-08-12 17:42:30 +08:00
|
|
|
import utils._
|
2020-08-06 16:58:13 +08:00
|
|
|
import xiangshan.cache._
|
2020-08-12 17:42:30 +08:00
|
|
|
import utils.ParallelAND
|
2020-08-13 19:33:04 +08:00
|
|
|
import utils.TrueLRU
|
2020-08-12 17:42:30 +08:00
|
|
|
|
2020-08-04 21:12:08 +08:00
|
|
|
class SbufferUserBundle extends XSBundle {
|
2020-07-11 18:51:45 +08:00
|
|
|
val pc = UInt(VAddrBits.W) //for debug
|
|
|
|
}
|
|
|
|
|
2020-08-12 17:42:30 +08:00
|
|
|
trait HasSBufferConst extends HasXSParameter {
|
|
|
|
val sBufferIndexWidth: Int = log2Up(StoreBufferSize) // a.k.a. index of cache line
|
|
|
|
|
|
|
|
// paddr = tag + offset
|
|
|
|
val tagWidth: Int = PAddrBits - log2Up(CacheLineSize / 8)
|
|
|
|
val offsetWidth: Int = log2Up(CacheLineSize / 8)
|
|
|
|
|
|
|
|
val cacheMaskWidth: Int = CacheLineSize / 8
|
|
|
|
val instMaskWidth: Int = XLEN / 8
|
|
|
|
}
|
|
|
|
|
|
|
|
class SBufferCacheLine extends XSBundle with HasSBufferConst {
|
|
|
|
val valid = Bool()
|
|
|
|
val tag = UInt(tagWidth.W)
|
|
|
|
val data = Vec(cacheMaskWidth, UInt(8.W))// UInt(CacheLineSize.W)
|
|
|
|
val mask = Vec(cacheMaskWidth, Bool())
|
|
|
|
}
|
|
|
|
|
|
|
|
class UpdateInfo extends XSBundle with HasSBufferConst {
|
|
|
|
val idx: UInt = UInt(sBufferIndexWidth.W) // cache index effected by this store req
|
|
|
|
val newTag: UInt = UInt(tagWidth.W)
|
|
|
|
val newMask: Vec[Bool] = Vec(cacheMaskWidth, Bool())
|
|
|
|
val newData: Vec[UInt] = Vec(cacheMaskWidth, UInt(8.W))
|
|
|
|
|
|
|
|
val isForward: Bool = Bool() // this req has same tag as some former req
|
|
|
|
val isUpdated: Bool = Bool()
|
|
|
|
val isInserted: Bool = Bool()
|
|
|
|
val isIgnored: Bool = Bool()
|
|
|
|
}
|
|
|
|
|
2020-09-02 20:35:17 +08:00
|
|
|
class SbufferFlushBundle extends Bundle {
|
2020-09-03 17:10:57 +08:00
|
|
|
val valid = Output(Bool())
|
|
|
|
val empty = Input(Bool())
|
2020-09-02 20:35:17 +08:00
|
|
|
}
|
|
|
|
|
2020-07-11 18:51:45 +08:00
|
|
|
// Store buffer for XiangShan Out of Order LSU
|
2020-08-12 17:42:30 +08:00
|
|
|
class Sbuffer extends XSModule with HasSBufferConst {
|
2020-07-11 18:51:45 +08:00
|
|
|
val io = IO(new Bundle() {
|
2020-10-05 20:56:23 +08:00
|
|
|
val in = Vec(StorePipelineWidth, Flipped(Decoupled(new DCacheWordReq)))
|
2020-09-27 16:57:25 +08:00
|
|
|
val dcache = new DCacheLineIO
|
2020-07-16 14:09:38 +08:00
|
|
|
val forward = Vec(LoadPipelineWidth, Flipped(new LoadForwardQueryIO))
|
2020-09-03 15:23:00 +08:00
|
|
|
val flush = new Bundle {
|
|
|
|
val valid = Input(Bool())
|
|
|
|
val empty = Output(Bool())
|
|
|
|
} // sbuffer flush
|
2020-07-11 18:51:45 +08:00
|
|
|
})
|
2020-07-14 19:56:39 +08:00
|
|
|
|
2020-08-12 17:42:30 +08:00
|
|
|
val cache: Vec[SBufferCacheLine] = RegInit(VecInit(Seq.fill(StoreBufferSize)(0.U.asTypeOf(new SBufferCacheLine))))
|
|
|
|
|
|
|
|
val updateInfo = WireInit(VecInit(Seq.fill(StorePipelineWidth)(0.U.asTypeOf(new UpdateInfo))))
|
|
|
|
updateInfo := DontCare
|
|
|
|
|
2020-08-13 19:33:04 +08:00
|
|
|
val lru = new TrueLRU(StoreBufferSize)
|
|
|
|
|
2020-08-12 17:42:30 +08:00
|
|
|
def getTag(pa: UInt): UInt =
|
|
|
|
pa(PAddrBits - 1, PAddrBits - tagWidth)
|
|
|
|
|
2020-08-26 09:41:48 +08:00
|
|
|
def getAddr(tag: UInt): UInt =
|
|
|
|
Cat(tag, 0.U((PAddrBits - tagWidth).W))
|
|
|
|
|
2020-08-12 17:42:30 +08:00
|
|
|
def getByteOffset(pa: UInt): UInt =
|
2020-08-14 22:59:55 +08:00
|
|
|
Cat(pa(offsetWidth - 1, log2Up(8)), Fill(3, 0.U))
|
2020-08-12 17:42:30 +08:00
|
|
|
|
|
|
|
// check if cacheIdx is modified by former request in this cycle
|
|
|
|
def busy(cacheIdx: UInt, max: Int): Bool = {
|
|
|
|
if (max == 0)
|
|
|
|
false.B
|
|
|
|
else
|
2020-08-18 19:05:28 +08:00
|
|
|
ParallelOR((0 until max).map(i => updateInfo(i).idx === cacheIdx && io.in(i).valid)).asBool()
|
2020-08-12 17:42:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-09-01 10:46:27 +08:00
|
|
|
val lru_accessed = WireInit(VecInit(Seq.fill(StorePipelineWidth)(false.B)))
|
2020-07-20 18:43:10 +08:00
|
|
|
|
2020-11-18 20:47:14 +08:00
|
|
|
// Get retired store from lsq
|
2020-08-12 17:42:30 +08:00
|
|
|
//--------------------------------------------------------------------------------------------------------------------
|
|
|
|
for (storeIdx <- 0 until StorePipelineWidth) {
|
|
|
|
io.in(storeIdx).ready := false.B // when there is empty line or target address already in this buffer, assert true
|
|
|
|
// otherwise, assert false
|
|
|
|
// when d-cache write port is valid, write back the oldest line to d-cache
|
|
|
|
|
|
|
|
updateInfo(storeIdx).isForward := false.B
|
|
|
|
updateInfo(storeIdx).isUpdated := false.B
|
|
|
|
updateInfo(storeIdx).isInserted := false.B
|
|
|
|
updateInfo(storeIdx).isIgnored := false.B
|
|
|
|
|
|
|
|
// 0. compare with former requests
|
|
|
|
for (formerIdx <- 0 until storeIdx) {
|
|
|
|
// i: former request
|
2020-08-19 20:18:10 +08:00
|
|
|
when ((getTag(io.in(storeIdx).bits.addr) === updateInfo(formerIdx).newTag) &&
|
|
|
|
(updateInfo(formerIdx).isUpdated || updateInfo(formerIdx).isInserted) && io.in(storeIdx).valid && io.in(formerIdx).valid) {
|
2020-08-12 17:42:30 +08:00
|
|
|
updateInfo(storeIdx).isForward := true.B
|
2020-08-18 19:05:28 +08:00
|
|
|
updateInfo(formerIdx).isIgnored := true.B
|
2020-08-12 17:42:30 +08:00
|
|
|
updateInfo(storeIdx).idx := updateInfo(formerIdx).idx
|
2020-08-19 20:18:10 +08:00
|
|
|
XSDebug("req#%d writes same line with req#%d\n", storeIdx.U, formerIdx.U)
|
|
|
|
|
|
|
|
updateInfo(storeIdx).isInserted := updateInfo(formerIdx).isInserted
|
|
|
|
updateInfo(storeIdx).isUpdated := updateInfo(formerIdx).isUpdated
|
2020-08-12 17:42:30 +08:00
|
|
|
|
|
|
|
|
|
|
|
updateInfo(storeIdx).newTag := updateInfo(formerIdx).newTag
|
|
|
|
// update mask and data
|
|
|
|
(0 until cacheMaskWidth).foreach(i => {
|
2020-08-14 22:59:55 +08:00
|
|
|
when (i.U < getByteOffset(io.in(storeIdx).bits.addr).asUInt() ||
|
|
|
|
i.U > (getByteOffset(io.in(storeIdx).bits.addr) | 7.U)) {
|
2020-08-12 17:42:30 +08:00
|
|
|
updateInfo(storeIdx).newMask(i) := updateInfo(formerIdx).newMask(i)
|
|
|
|
updateInfo(storeIdx).newData(i) := updateInfo(formerIdx).newData(i)
|
|
|
|
} otherwise {
|
|
|
|
when (io.in(storeIdx).bits.mask.asBools()(i % 8)) {
|
|
|
|
updateInfo(storeIdx).newMask(i) := true.B
|
|
|
|
updateInfo(storeIdx).newData(i) := io.in(storeIdx).bits.data(8 * (i % 8 + 1) - 1, 8 * (i % 8))
|
|
|
|
} .otherwise {
|
|
|
|
updateInfo(storeIdx).newMask(i) := updateInfo(formerIdx).newMask(i)
|
|
|
|
updateInfo(storeIdx).newData(i) := updateInfo(formerIdx).newData(i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// 1. search for existing lines
|
|
|
|
for (bufIdx <- 0 until StoreBufferSize) {
|
2020-08-18 19:05:28 +08:00
|
|
|
when (!updateInfo(storeIdx).isForward && (getTag(io.in(storeIdx).bits.addr) === cache(bufIdx).tag) && cache(bufIdx).valid && io.in(storeIdx).valid) {
|
2020-08-12 17:42:30 +08:00
|
|
|
// mark this line as UPDATE
|
|
|
|
updateInfo(storeIdx).isUpdated := true.B
|
|
|
|
updateInfo(storeIdx).idx := bufIdx.U
|
|
|
|
updateInfo(storeIdx).newTag := getTag(io.in(storeIdx).bits.addr)
|
|
|
|
|
|
|
|
// update mask and data
|
|
|
|
(0 until cacheMaskWidth).foreach(i => {
|
2020-08-14 22:59:55 +08:00
|
|
|
when (i.U < getByteOffset(io.in(storeIdx).bits.addr).asUInt() ||
|
|
|
|
i.U > (getByteOffset(io.in(storeIdx).bits.addr) | 7.U)) {
|
2020-08-12 17:42:30 +08:00
|
|
|
updateInfo(storeIdx).newMask(i) := cache(bufIdx).mask(i)
|
|
|
|
updateInfo(storeIdx).newData(i) := cache(bufIdx).data(i)
|
|
|
|
} otherwise {
|
|
|
|
when (io.in(storeIdx).bits.mask.asBools()(i % 8)) {
|
|
|
|
updateInfo(storeIdx).newMask(i) := true.B
|
|
|
|
updateInfo(storeIdx).newData(i) := io.in(storeIdx).bits.data(8 * (i % 8 + 1) - 1, 8 * (i % 8))
|
|
|
|
} .otherwise {
|
|
|
|
updateInfo(storeIdx).newMask(i) := cache(bufIdx).mask(i)
|
|
|
|
updateInfo(storeIdx).newData(i) := cache(bufIdx).data(i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2020-08-14 22:59:55 +08:00
|
|
|
|
2020-08-12 17:42:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// 2. not found target address in existing lines, try to insert a new line
|
|
|
|
val freeVec = WireInit(VecInit((0 until StoreBufferSize).map(i => cache(i).valid || busy(i.U, storeIdx))))
|
|
|
|
val hasFree = !ParallelAND(freeVec)
|
|
|
|
val nextFree = PriorityEncoder(freeVec.map(i => !i))
|
2020-08-13 19:33:04 +08:00
|
|
|
// XSInfo("hasFree: %d, nextFreeIdx: %d\n", hasFree, nextFree)
|
2020-08-12 17:42:30 +08:00
|
|
|
|
2020-08-18 19:05:28 +08:00
|
|
|
when (!updateInfo(storeIdx).isForward && !updateInfo(storeIdx).isUpdated && hasFree && io.in(storeIdx).valid) {
|
2020-08-12 17:42:30 +08:00
|
|
|
updateInfo(storeIdx).isInserted := true.B
|
|
|
|
updateInfo(storeIdx).idx := nextFree
|
|
|
|
updateInfo(storeIdx).newTag := getTag(io.in(storeIdx).bits.addr)
|
|
|
|
|
|
|
|
// set mask and data
|
|
|
|
(0 until cacheMaskWidth).foreach(i => {
|
2020-08-14 22:59:55 +08:00
|
|
|
when (i.U < getByteOffset(io.in(storeIdx).bits.addr).asUInt() ||
|
|
|
|
i.U > (getByteOffset(io.in(storeIdx).bits.addr) | 7.U)) {
|
2020-08-12 17:42:30 +08:00
|
|
|
updateInfo(storeIdx).newMask(i) := false.B
|
|
|
|
updateInfo(storeIdx).newData(i) := 0.U
|
|
|
|
} otherwise {
|
|
|
|
when (io.in(storeIdx).bits.mask.asBools()(i % 8)) {
|
|
|
|
updateInfo(storeIdx).newMask(i) := true.B
|
|
|
|
updateInfo(storeIdx).newData(i) := io.in(storeIdx).bits.data(8 * (i % 8 + 1) - 1, 8 * (i % 8))
|
2020-08-14 22:59:55 +08:00
|
|
|
// XSInfo("[%d] write data %x\n", i.U, io.in(storeIdx).bits.data(8 * (i % 8 + 1) - 1, 8 * (i % 8)))
|
2020-08-12 17:42:30 +08:00
|
|
|
} .otherwise {
|
|
|
|
updateInfo(storeIdx).newMask(i) := false.B
|
|
|
|
updateInfo(storeIdx).newData(i) := 0.U
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2020-08-14 22:59:55 +08:00
|
|
|
|
2020-08-12 17:42:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// 3. not enough space for this query
|
|
|
|
when (!updateInfo(storeIdx).isForward && !updateInfo(storeIdx).isUpdated && !updateInfo(storeIdx).isInserted) {
|
|
|
|
updateInfo(storeIdx).isIgnored := true.B
|
|
|
|
}
|
|
|
|
|
2020-08-13 19:33:04 +08:00
|
|
|
XSInfo(updateInfo(storeIdx).isUpdated && updateInfo(storeIdx).isInserted, "Error: one line is both updated and inserted!\n")
|
2020-08-12 17:42:30 +08:00
|
|
|
|
2020-08-19 20:18:10 +08:00
|
|
|
if (storeIdx > 0)
|
|
|
|
io.in(storeIdx).ready := io.in(storeIdx - 1).ready && (updateInfo(storeIdx).isUpdated || updateInfo(storeIdx).isInserted)
|
|
|
|
else
|
|
|
|
io.in(storeIdx).ready := updateInfo(storeIdx).isUpdated || updateInfo(storeIdx).isInserted
|
2020-08-12 17:42:30 +08:00
|
|
|
|
|
|
|
when(io.in(storeIdx).fire()){
|
|
|
|
|
|
|
|
|
2020-08-18 19:05:28 +08:00
|
|
|
when(updateInfo(storeIdx).isIgnored) {
|
2020-08-19 20:18:10 +08:00
|
|
|
XSInfo("Ignore req#%d with paddr %x, mask %x, data %x\n", storeIdx.U, io.in(storeIdx).bits.addr, io.in(storeIdx).bits.mask, io.in(storeIdx).bits.data)
|
2020-08-18 19:05:28 +08:00
|
|
|
|
|
|
|
|
2020-08-19 20:18:10 +08:00
|
|
|
// Update
|
2020-08-18 19:05:28 +08:00
|
|
|
// ----------------------------------------
|
2020-08-19 20:18:10 +08:00
|
|
|
} .elsewhen(updateInfo(storeIdx).isUpdated) {
|
2020-08-12 17:42:30 +08:00
|
|
|
// clear lruCnt
|
2020-08-13 19:33:04 +08:00
|
|
|
// cache(updateInfo(storeIdx).idx).lruCnt := 0.U
|
|
|
|
lru.access(updateInfo(storeIdx).idx)
|
2020-09-01 10:46:27 +08:00
|
|
|
lru_accessed(storeIdx) := true.B
|
2020-08-12 17:42:30 +08:00
|
|
|
// update mask and data
|
2020-08-14 22:59:55 +08:00
|
|
|
// cache(updateInfo(storeIdx).idx).data := updateInfo(storeIdx).newData
|
|
|
|
cache(updateInfo(storeIdx).idx).data.zipWithIndex.foreach { case (int, i) =>
|
|
|
|
int := updateInfo(storeIdx).newData(i)
|
|
|
|
}
|
|
|
|
// cache(updateInfo(storeIdx).idx).mask := updateInfo(storeIdx).newMask
|
|
|
|
cache(updateInfo(storeIdx).idx).mask.zipWithIndex.foreach { case (int, i) =>
|
|
|
|
int := updateInfo(storeIdx).newMask(i)
|
|
|
|
}
|
|
|
|
|
2020-08-19 20:18:10 +08:00
|
|
|
XSInfo("Update line#%d with tag %x, mask %x, data %x\n", updateInfo(storeIdx).idx, cache(updateInfo(storeIdx).idx).tag,
|
2020-08-14 22:59:55 +08:00
|
|
|
io.in(storeIdx).bits.mask, io.in(storeIdx).bits.data)
|
2020-08-12 17:42:30 +08:00
|
|
|
|
|
|
|
|
|
|
|
// Insert
|
|
|
|
// ----------------------------------------
|
|
|
|
} .elsewhen(updateInfo(storeIdx).isInserted) {
|
|
|
|
// clear lruCnt
|
2020-08-13 19:33:04 +08:00
|
|
|
// cache(updateInfo(storeIdx).idx).lruCnt := 0.U
|
|
|
|
lru.access(updateInfo(storeIdx).idx)
|
2020-09-01 10:46:27 +08:00
|
|
|
lru_accessed(storeIdx) := true.B
|
2020-08-12 17:42:30 +08:00
|
|
|
// set valid
|
|
|
|
cache(updateInfo(storeIdx).idx).valid := true.B
|
|
|
|
// set tag
|
|
|
|
cache(updateInfo(storeIdx).idx).tag := updateInfo(storeIdx).newTag
|
|
|
|
// update mask and data
|
2020-08-14 22:59:55 +08:00
|
|
|
// cache(updateInfo(storeIdx).idx).data := updateInfo(storeIdx).newData
|
|
|
|
// cache(updateInfo(storeIdx).idx).mask := updateInfo(storeIdx).newMask
|
|
|
|
cache(updateInfo(storeIdx).idx).data.zipWithIndex.foreach { case (int, i) =>
|
|
|
|
int := updateInfo(storeIdx).newData(i)
|
|
|
|
}
|
|
|
|
cache(updateInfo(storeIdx).idx).mask.zipWithIndex.foreach { case (int, i) =>
|
|
|
|
int := updateInfo(storeIdx).newMask(i)
|
|
|
|
}
|
|
|
|
|
|
|
|
XSInfo("Insert into line#%d with tag %x, mask: %x, data: %x, pa: %x\n", updateInfo(storeIdx).idx, getTag(io.in(storeIdx).bits.addr),
|
|
|
|
io.in(storeIdx).bits.mask, io.in(storeIdx).bits.data, io.in(storeIdx).bits.addr)
|
2020-08-12 17:42:30 +08:00
|
|
|
} // ignore UNCHANGED & EVICTED state
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Write back to d-cache
|
|
|
|
//--------------------------------------------------------------------------------------------------------------------
|
2020-09-01 09:45:07 +08:00
|
|
|
|
|
|
|
val WriteBackPortCount = 2
|
|
|
|
val FlushPort = 0 // flush has higher priority
|
|
|
|
val EvictionPort = 1
|
|
|
|
|
|
|
|
val wb_arb = Module(new Arbiter(UInt(), WriteBackPortCount))
|
|
|
|
val wb_resp = WireInit(false.B)
|
|
|
|
|
2020-08-12 17:42:30 +08:00
|
|
|
val waitingCacheLine: SBufferCacheLine = RegInit(0.U.asTypeOf(new SBufferCacheLine))
|
|
|
|
|
2020-09-01 09:45:07 +08:00
|
|
|
|
|
|
|
// LRU eviction
|
|
|
|
//-------------------------------------------------
|
2020-08-19 22:51:47 +08:00
|
|
|
val validCnt: UInt = Wire(UInt((sBufferIndexWidth + 1).W))
|
2020-08-12 17:42:30 +08:00
|
|
|
validCnt := PopCount((0 until StoreBufferSize).map(i => cache(i).valid))
|
2020-08-13 19:33:04 +08:00
|
|
|
XSInfo("[ %d ] lines valid this cycle\n", validCnt)
|
2020-08-12 17:42:30 +08:00
|
|
|
|
|
|
|
val oldestLineIdx: UInt = Wire(UInt(sBufferIndexWidth.W))
|
2020-08-13 19:33:04 +08:00
|
|
|
oldestLineIdx := lru.way
|
2020-08-24 19:51:04 +08:00
|
|
|
XSInfo("Least recently used #[ %d ] line\n", oldestLineIdx)
|
2020-08-12 17:42:30 +08:00
|
|
|
|
2020-09-01 09:45:07 +08:00
|
|
|
|
|
|
|
// eviction state machine
|
|
|
|
val e_wb_req :: e_wb_resp :: Nil = Enum(2)
|
|
|
|
val eviction_state = RegInit(e_wb_req)
|
|
|
|
|
|
|
|
wb_arb.io.in(EvictionPort).valid := false.B
|
|
|
|
wb_arb.io.in(EvictionPort).bits := DontCare
|
|
|
|
|
|
|
|
when (eviction_state === e_wb_req) {
|
|
|
|
wb_arb.io.in(EvictionPort).valid := validCnt === StoreBufferSize.U && !waitingCacheLine.valid
|
|
|
|
wb_arb.io.in(EvictionPort).bits := oldestLineIdx
|
|
|
|
when (wb_arb.io.in(EvictionPort).fire()) {
|
|
|
|
eviction_state := e_wb_resp
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-01 10:46:27 +08:00
|
|
|
val lru_miss = WireInit(false.B)
|
2020-09-01 09:45:07 +08:00
|
|
|
when (eviction_state === e_wb_resp) {
|
|
|
|
when (wb_resp) {
|
|
|
|
lru.miss
|
2020-09-01 10:46:27 +08:00
|
|
|
lru_miss := true.B
|
2020-09-01 09:45:07 +08:00
|
|
|
eviction_state := e_wb_req
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Sbuffer flush
|
|
|
|
//-------------------------------------------------
|
|
|
|
// flush state machine
|
2020-09-03 15:23:00 +08:00
|
|
|
val f_idle :: f_req :: f_wait_resp :: Nil = Enum(3)
|
|
|
|
val f_state = RegInit(f_idle)
|
|
|
|
val flush = io.flush
|
2020-09-08 16:21:14 +08:00
|
|
|
// empty means there are no valid cache line in sbuffer
|
|
|
|
// but there may exist cache line being flushed to dcache and not finished
|
2020-09-03 15:23:00 +08:00
|
|
|
val empty = validCnt === 0.U
|
2020-09-08 16:21:14 +08:00
|
|
|
|
|
|
|
// sbuffer is flushed empty only when:
|
|
|
|
// 1. there no valid line in sbuffer and
|
|
|
|
// 2. cache line waiting to be flushed are flushed out
|
|
|
|
flush.empty := empty && !waitingCacheLine.valid
|
2020-09-03 15:23:00 +08:00
|
|
|
|
|
|
|
wb_arb.io.in(FlushPort).valid := f_state === f_req
|
|
|
|
wb_arb.io.in(FlushPort).bits := PriorityEncoder((0 until StoreBufferSize).map(i => cache(i).valid))
|
|
|
|
|
2020-09-08 16:24:29 +08:00
|
|
|
// we only expect flush signal in f_idle state
|
|
|
|
assert(!(flush.valid && f_state =/= f_idle))
|
|
|
|
|
2020-09-03 15:23:00 +08:00
|
|
|
switch (f_state) {
|
|
|
|
is (f_idle) {
|
|
|
|
when (flush.valid && !empty) { f_state := f_req }
|
2020-09-01 09:45:07 +08:00
|
|
|
}
|
2020-09-03 15:23:00 +08:00
|
|
|
is (f_req) {
|
|
|
|
assert(!empty, "when flush, should not be empty")
|
|
|
|
when (wb_arb.io.in(FlushPort).fire()) { f_state := f_wait_resp }
|
2020-09-01 09:45:07 +08:00
|
|
|
}
|
2020-09-03 15:23:00 +08:00
|
|
|
is (f_wait_resp) { when (wb_resp) {
|
|
|
|
when (empty) { f_state := f_idle }
|
|
|
|
.otherwise { f_state := f_req } }
|
2020-09-01 09:45:07 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-10 17:32:03 +08:00
|
|
|
XSDebug(flush.valid, p"Reveive flush. f_state:${f_state}\n")
|
2020-09-03 15:23:00 +08:00
|
|
|
XSDebug(f_state =/= f_idle || flush.valid, p"f_state:${f_state} idx:${wb_arb.io.in(FlushPort).bits} In(${wb_arb.io.in(FlushPort).valid} ${wb_arb.io.in(FlushPort).ready}) wb_resp:${wb_resp}\n")
|
2020-09-01 09:45:07 +08:00
|
|
|
|
|
|
|
// write back unit
|
|
|
|
// ---------------------------------------------------------------
|
|
|
|
val s_invalid :: s_dcache_req :: s_dcache_resp :: Nil = Enum(3)
|
|
|
|
val state = RegInit(s_invalid)
|
|
|
|
|
|
|
|
val wb_idx = Reg(UInt())
|
|
|
|
|
2020-08-27 09:55:24 +08:00
|
|
|
val dcacheData = Wire(UInt(io.dcache.req.bits.data.getWidth.W))
|
|
|
|
val dcacheMask = Wire(UInt(io.dcache.req.bits.mask.getWidth.W))
|
|
|
|
dcacheData := DontCare
|
|
|
|
dcacheMask := DontCare
|
|
|
|
|
2020-08-12 17:42:30 +08:00
|
|
|
io.dcache.req.valid := false.B //needWriteToCache
|
2020-08-09 21:57:15 +08:00
|
|
|
io.dcache.req.bits.addr := DontCare
|
2020-08-27 09:55:24 +08:00
|
|
|
io.dcache.req.bits.data := dcacheData
|
|
|
|
io.dcache.req.bits.mask := dcacheMask
|
2020-08-24 19:49:50 +08:00
|
|
|
io.dcache.req.bits.cmd := MemoryOpConstants.M_XWR
|
2020-08-12 17:42:30 +08:00
|
|
|
io.dcache.req.bits.meta := DontCare // NOT USED
|
2020-09-01 09:45:07 +08:00
|
|
|
io.dcache.resp.ready := false.B
|
2020-08-12 17:42:30 +08:00
|
|
|
|
2020-09-01 09:45:07 +08:00
|
|
|
wb_arb.io.out.ready := false.B
|
2020-08-12 17:42:30 +08:00
|
|
|
|
2020-09-01 09:45:07 +08:00
|
|
|
// wbu state machine
|
|
|
|
when (state === s_invalid) {
|
|
|
|
wb_arb.io.out.ready := true.B
|
|
|
|
when (wb_arb.io.out.fire()) {
|
2020-09-01 10:46:27 +08:00
|
|
|
assert(cache(wb_arb.io.out.bits).valid)
|
2020-09-01 09:45:07 +08:00
|
|
|
wb_idx := wb_arb.io.out.bits
|
|
|
|
state := s_dcache_req
|
|
|
|
}
|
|
|
|
}
|
2020-08-12 17:42:30 +08:00
|
|
|
|
2020-09-01 09:45:07 +08:00
|
|
|
when (state === s_dcache_req) {
|
2020-08-12 17:42:30 +08:00
|
|
|
// assert valid and send data + mask + addr(ends with 000b) to d-cache
|
2020-09-01 09:45:07 +08:00
|
|
|
io.dcache.req.valid := true.B
|
|
|
|
io.dcache.req.bits.addr := getAddr(cache(wb_idx).tag)
|
2020-08-19 20:18:10 +08:00
|
|
|
|
2020-09-01 09:45:07 +08:00
|
|
|
// prepare write data and write mask
|
2020-09-07 15:36:48 +08:00
|
|
|
// first, we get data from cache
|
|
|
|
dcacheData := cache(wb_idx).data.asUInt()
|
|
|
|
dcacheMask := cache(wb_idx).mask.asUInt()
|
2020-08-12 17:42:30 +08:00
|
|
|
|
2020-09-07 15:36:48 +08:00
|
|
|
// then, we tried to merge any updates
|
2020-08-12 17:42:30 +08:00
|
|
|
for (i <- 0 until StorePipelineWidth) {
|
2020-09-07 15:36:48 +08:00
|
|
|
// get data from updateInfo
|
|
|
|
when (updateInfo(i).idx === wb_idx && updateInfo(i).isUpdated && io.in(i).valid) {
|
2020-08-27 09:55:24 +08:00
|
|
|
dcacheData := updateInfo(i).newData.asUInt()
|
|
|
|
dcacheMask := updateInfo(i).newMask.asUInt()
|
2020-08-12 17:42:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-07 15:36:48 +08:00
|
|
|
when(io.dcache.req.fire()) {
|
2020-09-01 09:45:07 +08:00
|
|
|
// save current req
|
|
|
|
waitingCacheLine := cache(wb_idx)
|
|
|
|
waitingCacheLine.data := dcacheData.asTypeOf(Vec(cacheMaskWidth, UInt(8.W)))
|
|
|
|
waitingCacheLine.mask := dcacheMask.asTypeOf(Vec(cacheMaskWidth, Bool()))
|
2020-09-07 15:36:48 +08:00
|
|
|
waitingCacheLine.valid := true.B
|
2020-08-12 17:42:30 +08:00
|
|
|
|
2020-09-01 09:45:07 +08:00
|
|
|
cache(wb_idx).valid := false.B
|
2020-08-12 17:42:30 +08:00
|
|
|
|
2020-09-01 09:45:07 +08:00
|
|
|
state := s_dcache_resp
|
2020-08-12 17:42:30 +08:00
|
|
|
|
2020-09-07 15:36:48 +08:00
|
|
|
assert(cache(wb_idx).valid, "sbuffer cache line not valid\n")
|
|
|
|
XSInfo("send req to dcache %x\n", wb_idx)
|
|
|
|
XSDebug("[New D-Cache Req] idx: %d, addr: %x, mask: %x, data: %x\n",
|
|
|
|
wb_idx, io.dcache.req.bits.addr, dcacheMask.asUInt(), dcacheData.asUInt())
|
2020-09-01 09:45:07 +08:00
|
|
|
}
|
2020-07-14 19:56:39 +08:00
|
|
|
}
|
|
|
|
|
2020-09-01 09:45:07 +08:00
|
|
|
when (state === s_dcache_resp) {
|
|
|
|
io.dcache.resp.ready := true.B
|
|
|
|
when(io.dcache.resp.fire()) {
|
|
|
|
waitingCacheLine.valid := false.B
|
|
|
|
wb_resp := true.B
|
|
|
|
state := s_invalid
|
|
|
|
XSInfo("recv resp from dcache. wb tag %x mask %x data %x\n", waitingCacheLine.tag, waitingCacheLine.mask.asUInt(), waitingCacheLine.data.asUInt())
|
|
|
|
}
|
2020-09-07 15:36:48 +08:00
|
|
|
|
|
|
|
// the inflight req
|
|
|
|
XSDebug("[Pending Write Back] tag: %x, mask: %x, data: %x\n",
|
|
|
|
waitingCacheLine.tag, waitingCacheLine.mask.asUInt(), waitingCacheLine.data.asUInt())
|
2020-08-12 17:42:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-07-14 19:56:39 +08:00
|
|
|
// loadForwardQuery
|
2020-08-12 17:42:30 +08:00
|
|
|
//--------------------------------------------------------------------------------------------------------------------
|
|
|
|
(0 until LoadPipelineWidth).map(loadIdx => {
|
|
|
|
io.forward(loadIdx).forwardMask := VecInit(List.fill(instMaskWidth)(false.B))
|
|
|
|
io.forward(loadIdx).forwardData := DontCare
|
|
|
|
|
2020-08-19 20:18:10 +08:00
|
|
|
when(getTag(io.forward(loadIdx).paddr) === waitingCacheLine.tag && waitingCacheLine.valid) {
|
|
|
|
(0 until XLEN / 8).foreach(i => {
|
|
|
|
when (waitingCacheLine.mask(i.U + getByteOffset(io.forward(loadIdx).paddr)) && io.forward(loadIdx).mask(i)) {
|
|
|
|
io.forward(loadIdx).forwardData(i) := waitingCacheLine.data(i.U + getByteOffset(io.forward(loadIdx).paddr))
|
|
|
|
io.forward(loadIdx).forwardMask(i) := true.B
|
|
|
|
}
|
2020-08-12 17:42:30 +08:00
|
|
|
})
|
2020-08-27 10:47:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// data in StoreBuffer should have higer priority than waitingCacheLine
|
|
|
|
for (sBufIdx <- 0 until StoreBufferSize) {
|
|
|
|
when(getTag(io.forward(loadIdx).paddr) === cache(sBufIdx).tag && cache(sBufIdx).valid) {
|
|
|
|
// send data with mask in this line
|
|
|
|
// this mask is not 'mask for cache line' and we need to check low bits of paddr
|
|
|
|
// to get certain part of one line
|
2020-11-18 20:47:14 +08:00
|
|
|
// P.S. data in io.in will be manipulated by lsq
|
2020-08-27 10:47:05 +08:00
|
|
|
(0 until XLEN / 8).foreach(i => {
|
|
|
|
when (cache(sBufIdx).mask(i.U + getByteOffset(io.forward(loadIdx).paddr)) && io.forward(loadIdx).mask(i)) {
|
|
|
|
io.forward(loadIdx).forwardData(i) := cache(sBufIdx).data(i.U + getByteOffset(io.forward(loadIdx).paddr))
|
|
|
|
io.forward(loadIdx).forwardMask(i) := true.B
|
2020-08-19 20:18:10 +08:00
|
|
|
}
|
2020-08-27 10:47:05 +08:00
|
|
|
})
|
|
|
|
|
|
|
|
when (io.forward(loadIdx).valid) {
|
|
|
|
XSDebug("[ForwardReq] paddr: %x mask: %x pc: %x\n", io.forward(loadIdx).paddr, io.forward(loadIdx).mask, io.forward(loadIdx).pc)
|
|
|
|
XSDebug("[Forwarding] forward-data: %x forward-mask: %x\n", io.forward(loadIdx).forwardData.asUInt(),
|
|
|
|
io.forward(loadIdx).forwardMask.asUInt())
|
2020-08-12 17:42:30 +08:00
|
|
|
}
|
2020-08-27 10:47:05 +08:00
|
|
|
}
|
2020-08-12 17:42:30 +08:00
|
|
|
}
|
2020-08-27 10:47:05 +08:00
|
|
|
|
2020-07-16 14:09:38 +08:00
|
|
|
})
|
2020-08-12 17:42:30 +08:00
|
|
|
|
|
|
|
// additional logs
|
|
|
|
XSInfo(io.in(0).fire(), "ensbuffer addr 0x%x wdata 0x%x\n", io.in(0).bits.addr, io.in(0).bits.data)
|
|
|
|
XSInfo(io.in(1).fire(), "ensbuffer addr 0x%x wdata 0x%x\n", io.in(1).bits.addr, io.in(1).bits.data)
|
|
|
|
XSInfo(io.dcache.req.fire(), "desbuffer addr 0x%x wdata 0x%x\n", io.dcache.req.bits.addr, io.dcache.req.bits.data)
|
2020-08-14 22:59:55 +08:00
|
|
|
|
|
|
|
// output cache line
|
|
|
|
cache.zipWithIndex.foreach { case (line, i) => {
|
|
|
|
XSDebug(line.valid, "[#%d line] Tag: %x, data: %x, mask: %x\n", i.U, line.tag, line.data.asUInt(), line.mask.asUInt())
|
|
|
|
}}
|
2020-09-04 10:53:01 +08:00
|
|
|
|
|
|
|
XSPerf("waitResp", waitingCacheLine.valid)
|
2020-07-11 18:51:45 +08:00
|
|
|
}
|